]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'drm-intel-next-fixes-2020-05-20' of git://anongit.freedesktop.org/drm...
authorDave Airlie <airlied@redhat.com>
Thu, 21 May 2020 00:44:32 +0000 (10:44 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 21 May 2020 00:44:33 +0000 (10:44 +1000)
Fix for TypeC power domain toggling on resets (Cc: stable).
Two compile time warning fixes.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200520123227.GA21104@jlahtine-desk.ger.corp.intel.com
1185 files changed:
Documentation/admin-guide/sysctl/kernel.rst
Documentation/core-api/timekeeping.rst
Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml
Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
Documentation/devicetree/bindings/display/bridge/adi,adv7123.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
Documentation/devicetree/bindings/display/bridge/anx6345.yaml
Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/ps8640.yaml
Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/ti,ths813x.txt [deleted file]
Documentation/devicetree/bindings/display/dsi-controller.yaml
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt [deleted file]
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt [deleted file]
Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
Documentation/devicetree/bindings/display/panel/display-timings.yaml
Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt [deleted file]
Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt [deleted file]
Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt [deleted file]
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt [deleted file]
Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt [deleted file]
Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt [deleted file]
Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.txt [deleted file]
Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt [deleted file]
Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lg,lg4573.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/lg,lh500wx1-sd03.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/lvds.yaml
Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt [deleted file]
Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt [deleted file]
Documentation/devicetree/bindings/display/panel/panel-common.yaml
Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt [deleted file]
Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,ld9040.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt [deleted file]
Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/simple-panel.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sony,acx565akm.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.txt [deleted file]
Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/tpo,td.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt [deleted file]
Documentation/devicetree/bindings/display/panel/tpo,td043mtea1.txt [deleted file]
Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
Documentation/devicetree/bindings/display/renesas,du.txt
Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt [deleted file]
Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt [deleted file]
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml
Documentation/devicetree/bindings/hwmon/adt7475.yaml
Documentation/devicetree/bindings/iio/dac/ad5755.txt
Documentation/devicetree/bindings/iio/dac/adi,ad5770r.yaml
Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml
Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml
Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml
Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
Documentation/devicetree/bindings/net/ethernet-phy.yaml
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml
Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
Documentation/devicetree/bindings/usb/ingenic,musb.yaml
Documentation/devicetree/bindings/usb/qcom,dwc3.txt
Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/driver-api/dma-buf.rst
Documentation/gpu/amdgpu.rst
Documentation/gpu/drm-internals.rst
Documentation/gpu/drm-kms.rst
Documentation/gpu/drm-mm.rst
Documentation/gpu/todo.rst
Documentation/hwmon/isl68137.rst
Documentation/networking/devlink/devlink-trap.rst
Documentation/networking/index.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/net_dim.rst [new file with mode: 0644]
Documentation/networking/net_dim.txt [deleted file]
Documentation/x86/boot.rst
MAINTAINERS
Makefile
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6qp.dtsi
arch/arm/mach-sa1100/shannon.c
arch/arm/net/bpf_jit_32.c
arch/arm/xen/enlighten.c
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/vdso.c
arch/m68k/include/asm/Kbuild
arch/riscv/Kconfig
arch/riscv/net/bpf_jit_comp64.c
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/efi.h
arch/x86/include/asm/microcode_amd.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/umip.c
arch/x86/platform/efi/efi_64.c
block/blk-mq.c
block/blk-wbt.c
drivers/ata/ahci.c
drivers/block/rbd.c
drivers/clk/clk-asm9260.c
drivers/clk/mmp/clk-pll.c
drivers/clk/mmp/clk.c
drivers/clk/mmp/clk.h
drivers/clk/sprd/sc9863a-clk.c
drivers/dma-buf/Makefile
drivers/dma-buf/dma-buf.c
drivers/dma-buf/dma-fence-chain.c
drivers/dma-buf/dma-fence.c
drivers/dma-buf/selftests.h
drivers/dma-buf/st-dma-fence-chain.c [new file with mode: 0644]
drivers/firmware/efi/cper.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/file.c
drivers/firmware/efi/libstub/x86-stub.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cikd.h
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c [changed mode: 0644->0755]
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c [changed mode: 0644->0755]
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/nvd.h
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/soc15d.h
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdgpu/vid.h
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/dc/basics/Makefile
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c [deleted file]
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_sink.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/Makefile
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
drivers/gpu/drm/amd/display/dmub/dmub_srv.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h [deleted file]
drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
drivers/gpu/drm/amd/display/include/hdcp_types.h
drivers/gpu/drm/amd/display/include/logger_interface.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
drivers/gpu/drm/amd/display/modules/stats/stats.c [deleted file]
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
drivers/gpu/drm/amd/powerplay/inc/smu_types.h
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/amd/powerplay/smu_internal.h
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/smu_v12_0.c
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arc/arcpgu_drv.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/aspeed/aspeed_gfx.h
drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
drivers/gpu/drm/aspeed/aspeed_gfx_out.c
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
drivers/gpu/drm/bochs/bochs.h
drivers/gpu/drm/bochs/bochs_drv.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/Makefile
drivers/gpu/drm/bridge/adv7511/Kconfig
drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
drivers/gpu/drm/bridge/chrontel-ch7033.c [new file with mode: 0644]
drivers/gpu/drm/bridge/nwl-dsi.c [new file with mode: 0644]
drivers/gpu/drm/bridge/nwl-dsi.h [new file with mode: 0644]
drivers/gpu/drm/bridge/panel.c
drivers/gpu/drm/bridge/parade-ps8640.c
drivers/gpu/drm/bridge/sii9234.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/tc358768.c
drivers/gpu/drm/cirrus/Kconfig [deleted file]
drivers/gpu/drm/cirrus/Makefile [deleted file]
drivers/gpu/drm/cirrus/cirrus.c [deleted file]
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_blend.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_dma.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_framebuffer_helper.c
drivers/gpu/drm/drm_gem_vram_helper.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_managed.c [new file with mode: 0644]
drivers/gpu/drm/drm_mipi_dbi.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_mode_config.c
drivers/gpu/drm/drm_mode_object.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/drm_vram_helper_common.c [deleted file]
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
drivers/gpu/drm/gma500/cdv_intel_crt.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/cdv_intel_lvds.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
drivers/gpu/drm/gma500/mdfld_intel_display.c
drivers/gpu/drm/gma500/mdfld_output.h
drivers/gpu/drm/gma500/mdfld_tmd_vid.c
drivers/gpu/drm/gma500/mdfld_tpo_vid.c
drivers/gpu/drm/gma500/oaktrail_hdmi.c
drivers/gpu/drm/gma500/oaktrail_lvds.c
drivers/gpu/drm/gma500/psb_intel_drv.h
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
drivers/gpu/drm/i2c/sil164_drv.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_debugfs.h
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_debugfs.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/ingenic/ingenic-drm.c
drivers/gpu/drm/lima/Kconfig
drivers/gpu/drm/lima/Makefile
drivers/gpu/drm/lima/lima_bcast.c
drivers/gpu/drm/lima/lima_bcast.h
drivers/gpu/drm/lima/lima_ctx.c
drivers/gpu/drm/lima/lima_ctx.h
drivers/gpu/drm/lima/lima_devfreq.c [new file with mode: 0644]
drivers/gpu/drm/lima/lima_devfreq.h [new file with mode: 0644]
drivers/gpu/drm/lima/lima_device.c
drivers/gpu/drm/lima/lima_device.h
drivers/gpu/drm/lima/lima_dlbu.c
drivers/gpu/drm/lima/lima_dlbu.h
drivers/gpu/drm/lima/lima_drv.c
drivers/gpu/drm/lima/lima_drv.h
drivers/gpu/drm/lima/lima_dump.h [new file with mode: 0644]
drivers/gpu/drm/lima/lima_gp.c
drivers/gpu/drm/lima/lima_gp.h
drivers/gpu/drm/lima/lima_l2_cache.c
drivers/gpu/drm/lima/lima_l2_cache.h
drivers/gpu/drm/lima/lima_mmu.c
drivers/gpu/drm/lima/lima_mmu.h
drivers/gpu/drm/lima/lima_pmu.c
drivers/gpu/drm/lima/lima_pmu.h
drivers/gpu/drm/lima/lima_pp.c
drivers/gpu/drm/lima/lima_pp.h
drivers/gpu/drm/lima/lima_sched.c
drivers/gpu/drm/lima/lima_sched.h
drivers/gpu/drm/lima/lima_trace.c [new file with mode: 0644]
drivers/gpu/drm/lima/lima_trace.h [new file with mode: 0644]
drivers/gpu/drm/lima/lima_vm.h
drivers/gpu/drm/mcde/mcde_display.c
drivers/gpu/drm/mcde/mcde_drm.h
drivers/gpu/drm/mcde/mcde_drv.c
drivers/gpu/drm/mcde/mcde_dsi.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_drv.h
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/meson/meson_vclk.c
drivers/gpu/drm/meson/meson_vclk.h
drivers/gpu/drm/mgag200/mgag200_cursor.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_i2c.c
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_debugfs.h
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_debugfs.h
drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/omap_debugfs.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
drivers/gpu/drm/panel/panel-ilitek-ili9322.c
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
drivers/gpu/drm/panel/panel-novatek-nt39016.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panel/panel-truly-nt35597.c
drivers/gpu/drm/panel/panel-visionox-rm69299.c [new file with mode: 0644]
drivers/gpu/drm/pl111/Makefile
drivers/gpu/drm/pl111/pl111_debugfs.c
drivers/gpu/drm/pl111/pl111_drm.h
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/pl111/pl111_versatile.c
drivers/gpu/drm/pl111/pl111_vexpress.c [deleted file]
drivers/gpu/drm/pl111/pl111_vexpress.h [deleted file]
drivers/gpu/drm/qxl/qxl_debugfs.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_dumb.c
drivers/gpu/drm/qxl/qxl_gem.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/r128/ati_pcigart.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/cdn-dp-reg.c
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rk3066_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_lvds.c
drivers/gpu/drm/rockchip/rockchip_rgb.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/shmobile/shmob_drm_kms.c
drivers/gpu/drm/sti/sti_compositor.c
drivers/gpu/drm/sti/sti_compositor.h
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/sti/sti_cursor.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_gdp.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/sti/sti_mixer.c
drivers/gpu/drm/sti/sti_mixer.h
drivers/gpu/drm/sti/sti_tvout.c
drivers/gpu/drm/sti/sti_vid.c
drivers/gpu/drm/sti/sti_vid.h
drivers/gpu/drm/stm/drv.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_lvds.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tv.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_mixer.h
drivers/gpu/drm/sun4i/sun8i_ui_layer.c
drivers/gpu/drm/sun4i/sun8i_vi_layer.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/rgb.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tidss/tidss_crtc.c
drivers/gpu/drm/tidss/tidss_dispc.c
drivers/gpu/drm/tidss/tidss_dispc.h
drivers/gpu/drm/tidss/tidss_drv.c
drivers/gpu/drm/tidss/tidss_drv.h
drivers/gpu/drm/tidss/tidss_encoder.c
drivers/gpu/drm/tidss/tidss_irq.c
drivers/gpu/drm/tidss/tidss_kms.c
drivers/gpu/drm/tidss/tidss_kms.h
drivers/gpu/drm/tidss/tidss_plane.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_external.c
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tiny/Kconfig
drivers/gpu/drm/tiny/Makefile
drivers/gpu/drm/tiny/cirrus.c [new file with mode: 0644]
drivers/gpu/drm/tiny/gm12u320.c
drivers/gpu/drm/tiny/hx8357d.c
drivers/gpu/drm/tiny/ili9225.c
drivers/gpu/drm/tiny/ili9341.c
drivers/gpu/drm/tiny/ili9486.c
drivers/gpu/drm/tiny/mi0283qt.c
drivers/gpu/drm/tiny/repaper.c
drivers/gpu/drm/tiny/st7586.c
drivers/gpu/drm/tiny/st7735r.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/v3d/v3d_debugfs.c
drivers/gpu/drm/v3d/v3d_drv.c
drivers/gpu/drm/v3d/v3d_drv.h
drivers/gpu/drm/v3d/v3d_gem.c
drivers/gpu/drm/v3d/v3d_irq.c
drivers/gpu/drm/v3d/v3d_mmu.c
drivers/gpu/drm/v3d/v3d_sched.c
drivers/gpu/drm/vboxvideo/vbox_drv.c
drivers/gpu/drm/vboxvideo/vbox_drv.h
drivers/gpu/drm/vboxvideo/vbox_irq.c
drivers/gpu/drm/vboxvideo/vbox_main.c
drivers/gpu/drm/vboxvideo/vbox_mode.c
drivers/gpu/drm/vboxvideo/vbox_ttm.c
drivers/gpu/drm/vc4/vc4_debugfs.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_vec.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/virtio/virtgpu_debugfs.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_gem.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vkms/vkms_drv.c
drivers/gpu/drm/vkms/vkms_drv.h
drivers/gpu/drm/vkms/vkms_gem.c
drivers/gpu/drm/vkms/vkms_output.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/zte/zx_hdmi.c
drivers/gpu/drm/zte/zx_tvenc.c
drivers/gpu/drm/zte/zx_vga.c
drivers/hv/channel_mgmt.c
drivers/hv/hv_debugfs.c
drivers/hv/hyperv_vmbus.h
drivers/hv/vmbus_drv.c
drivers/hwmon/Kconfig
drivers/hwmon/drivetemp.c
drivers/hwmon/jc42.c
drivers/hwmon/k10temp.c
drivers/hwmon/pmbus/isl68137.c
drivers/i2c/busses/i2c-altera.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core-base.c
drivers/irqchip/irq-bcm7038-l1.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-meson-gpio.c
drivers/irqchip/irq-mvebu-icu.c
drivers/irqchip/irq-sifive-plic.c
drivers/irqchip/irq-ti-sci-inta.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/mtd/spi-nor/Makefile
drivers/net/dsa/mt7530.c
drivers/net/dsa/mt7530.h
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/ocelot/felix.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot.h
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ipa/ipa_modem.c
drivers/net/macsec.c
drivers/net/phy/marvell.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/tun.c
drivers/net/wireless/ath/ath11k/thermal.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/realtek/rtw88/pci.c
drivers/of/overlay.c
drivers/of/unittest-data/overlay_bad_add_dup_prop.dts
drivers/of/unittest.c
drivers/opp/core.c
drivers/platform/chrome/cros_ec_sensorhub_ring.c
drivers/s390/block/Kconfig
drivers/scsi/hisi_sas/Kconfig
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/sg.c
drivers/target/target_core_fabric_lib.c
drivers/target/target_core_user.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/arcfb.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/controlfb.c
drivers/video/fbdev/core/fbmon.c
drivers/video/fbdev/cyber2000fb.c
drivers/video/fbdev/i810/i810_main.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/matrox/g450_pll.c
drivers/video/fbdev/matrox/matroxfb_base.h
drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
drivers/video/fbdev/mx3fb.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/dss/dispc.c
drivers/video/fbdev/omap2/omapfb/dss/dss.h
drivers/video/fbdev/omap2/omapfb/dss/venc.c
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
drivers/video/fbdev/pm2fb.c
drivers/video/fbdev/pm3fb.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/riva/riva_hw.c
drivers/video/fbdev/s1d13xxxfb.c
drivers/video/fbdev/sa1100fb.c
drivers/video/fbdev/sa1100fb.h
drivers/video/fbdev/savage/savagefb.h
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/udlfb.c
drivers/video/fbdev/uvesafb.c
drivers/video/fbdev/valkyriefb.c
drivers/video/fbdev/vesafb.c
drivers/video/fbdev/via/debug.h
drivers/video/fbdev/via/viafbdev.c
drivers/video/fbdev/vt8500lcdfb.c
drivers/video/fbdev/w100fb.c
drivers/watchdog/sp805_wdt.c
drivers/xen/xenbus/xenbus_client.c
fs/afs/dir.c
fs/afs/dir_silly.c
fs/afs/fsclient.c
fs/afs/yfsclient.c
fs/btrfs/block-group.c
fs/btrfs/file.c
fs/btrfs/reflink.c
fs/btrfs/relocation.c
fs/btrfs/space-info.c
fs/btrfs/tree-log.c
fs/buffer.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/mds_client.h
fs/cifs/cifssmb.c
fs/cifs/inode.c
fs/cifs/smb2pdu.c
fs/cifs/smb2transport.c
fs/ext4/balloc.c
fs/ext4/ext4_jbd2.c
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/io_uring.c
fs/nfs/pnfs.c
fs/proc/base.c
fs/proc/root.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_reflink.c
fs/xfs/xfs_super.c
include/asm-generic/mshyperv.h
include/drm/drm_client.h
include/drm/drm_connector.h
include/drm/drm_debugfs.h
include/drm/drm_device.h
include/drm/drm_displayid.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_drv.h
include/drm/drm_encoder.h
include/drm/drm_fb_helper.h
include/drm/drm_file.h
include/drm/drm_framebuffer.h
include/drm/drm_gem_framebuffer_helper.h
include/drm/drm_gem_vram_helper.h
include/drm/drm_legacy.h
include/drm/drm_managed.h [new file with mode: 0644]
include/drm/drm_mipi_dbi.h
include/drm/drm_mm.h
include/drm/drm_mode_config.h
include/drm/drm_modes.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_pci.h [deleted file]
include/drm/drm_print.h
include/drm/drm_writeback.h
include/drm/gpu_scheduler.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_debug.h [deleted file]
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/buffer_head.h
include/linux/can/dev/peak_canfd.h
include/linux/cpu_rmap.h
include/linux/digsig.h
include/linux/dirent.h
include/linux/dma-buf.h
include/linux/enclosure.h
include/linux/energy_model.h
include/linux/ethtool.h
include/linux/genalloc.h
include/linux/i2c.h
include/linux/igmp.h
include/linux/ihex.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/list_lru.h
include/linux/memcontrol.h
include/linux/platform_data/wilco-ec.h
include/linux/posix_acl.h
include/linux/rio.h
include/linux/rslib.h
include/linux/sched/topology.h
include/linux/skbuff.h
include/linux/swap.h
include/linux/ti_wilink_st.h
include/linux/tpm_eventlog.h
include/linux/xattr.h
include/net/cfg80211.h
include/net/ip6_route.h
include/net/netfilter/nf_tables.h
include/net/sock.h
include/soc/mscc/ocelot.h
include/sound/hda_codec.h
include/trace/events/wbt.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/drm_fourcc.h
include/uapi/linux/btrfs.h
include/uapi/linux/dlm_device.h
include/uapi/linux/fiemap.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter/xt_IDLETIMER.h
kernel/bpf/bpf_lru_list.h
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/fork.c
kernel/irq/manage.c
kernel/rcu/tree.c
kernel/sched/cputime.c
kernel/sched/isolation.c
kernel/signal.c
kernel/time/namespace.c
kernel/trace/trace_events_trigger.c
lib/Kconfig.debug
mm/mremap.c
mm/slob.c
mm/slub.c
net/core/dev.c
net/core/filter.c
net/core/net-sysfs.c
net/core/sock.c
net/dsa/port.c
net/hsr/hsr_netlink.c
net/ipv4/devinet.c
net/ipv6/icmp.c
net/ipv6/seg6.c
net/l2tp/l2tp_netlink.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mptcp/protocol.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_lookup.c
net/netfilter/nft_set_bitmap.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_IDLETIMER.c
net/qrtr/qrtr.c
net/rds/message.c
net/rds/rdma.c
net/rds/rds.h
net/rds/send.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/sched/cls_api.c
net/tipc/link.c
net/tls/tls_main.c
net/wireless/nl80211.c
net/xdp/xdp_umem.c
net/xdp/xsk.c
scripts/documentation-file-ref-check
scripts/dtc/Makefile
scripts/kernel-doc
security/keys/proc.c
security/selinux/ss/policydb.c
sound/hda/Kconfig
sound/pci/ctxfi/cthw20k1.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_intel.h
sound/pci/hda/patch_realtek.c
sound/usb/mixer.c
sound/usb/mixer_maps.c
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/bpf/bpftool/struct_ops.c
tools/include/linux/bits.h
tools/include/linux/build_bug.h [new file with mode: 0644]
tools/include/linux/compiler.h
tools/include/linux/const.h
tools/include/linux/kernel.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/fscrypt.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/mman.h
tools/include/uapi/linux/sched.h
tools/include/uapi/linux/vhost.h
tools/include/vdso/bits.h [new file with mode: 0644]
tools/include/vdso/const.h [new file with mode: 0644]
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/bpf/netlink.c
tools/objtool/check.c
tools/objtool/orc_dump.c
tools/objtool/orc_gen.c
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/check-headers.sh
tools/perf/trace/beauty/clone.c
tools/perf/trace/beauty/mmap.c
tools/perf/util/setup.py
tools/perf/util/stat-shadow.c
tools/testing/selftests/bpf/prog_tests/mmap.c
tools/testing/selftests/bpf/prog_tests/section_names.c
tools/testing/selftests/bpf/prog_tests/test_lsm.c
tools/testing/selftests/bpf/prog_tests/xdp_attach.c
tools/testing/selftests/bpf/prog_tests/xdp_info.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/lsm.c
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/tc-testing/tdc.py

index 39c95c0e13d30d656b213bfe6715332bf5a269a6..0d427fd1094194204d0fed72caf4b468e2d62017 100644 (file)
@@ -390,9 +390,17 @@ When ``kptr_restrict`` is set to 2, kernel pointers printed using
 modprobe
 ========
 
-This gives the full path of the modprobe command which the kernel will
-use to load modules. This can be used to debug module loading
-requests::
+The full path to the usermode helper for autoloading kernel modules,
+by default "/sbin/modprobe".  This binary is executed when the kernel
+requests a module.  For example, if userspace passes an unknown
+filesystem type to mount(), then the kernel will automatically request
+the corresponding filesystem module by executing this usermode helper.
+This usermode helper should insert the needed module into the kernel.
+
+This sysctl only affects module autoloading.  It has no effect on the
+ability to explicitly insert modules.
+
+This sysctl can be used to debug module loading requests::
 
     echo '#! /bin/sh' > /tmp/modprobe
     echo 'echo "$@" >> /tmp/modprobe.log' >> /tmp/modprobe
@@ -400,10 +408,15 @@ requests::
     chmod a+x /tmp/modprobe
     echo /tmp/modprobe > /proc/sys/kernel/modprobe
 
-This only applies when the *kernel* is requesting that the module be
-loaded; it won't have any effect if the module is being loaded
-explicitly using ``modprobe`` from userspace.
+Alternatively, if this sysctl is set to the empty string, then module
+autoloading is completely disabled.  The kernel will not try to
+execute a usermode helper at all, nor will it call the
+kernel_module_request LSM hook.
 
+If CONFIG_STATIC_USERMODEHELPER=y is set in the kernel configuration,
+then the configured static usermode helper overrides this sysctl,
+except that the empty string is still accepted to completely disable
+module autoloading as described above.
 
 modules_disabled
 ================
@@ -446,28 +459,6 @@ Notes:
      successful IPC object allocation. If an IPC object allocation syscall
      fails, it is undefined if the value remains unmodified or is reset to -1.
 
-modprobe:
-=========
-
-The path to the usermode helper for autoloading kernel modules, by
-default "/sbin/modprobe".  This binary is executed when the kernel
-requests a module.  For example, if userspace passes an unknown
-filesystem type to mount(), then the kernel will automatically request
-the corresponding filesystem module by executing this usermode helper.
-This usermode helper should insert the needed module into the kernel.
-
-This sysctl only affects module autoloading.  It has no effect on the
-ability to explicitly insert modules.
-
-If this sysctl is set to the empty string, then module autoloading is
-completely disabled.  The kernel will not try to execute a usermode
-helper at all, nor will it call the kernel_module_request LSM hook.
-
-If CONFIG_STATIC_USERMODEHELPER=y is set in the kernel configuration,
-then the configured static usermode helper overrides this sysctl,
-except that the empty string is still accepted to completely disable
-module autoloading as described above.
-
 nmi_watchdog
 ============
 
index c0ffa30c7c37e3060731645a55bf01922b5ca93f..729e24864fe738a3f5bd79a62d56d9e7c4542c85 100644 (file)
@@ -154,9 +154,9 @@ architectures. These are the recommended replacements:
 
        Use ktime_get() or ktime_get_ts64() instead.
 
-.. c:function:: struct timeval do_gettimeofday( void )
-               struct timespec getnstimeofday( void )
-               struct timespec64 getnstimeofday64( void )
+.. c:function:: void do_gettimeofday( struct timeval * )
+               void getnstimeofday( struct timespec * )
+               void getnstimeofday64( struct timespec64 * )
                void ktime_get_real_ts( struct timespec * )
 
        ktime_get_real_ts64() is a direct replacement, but consider using
index aa0738b4d53401dc84afc4cd8e5bb88e724acd8f..e713a6fe4cf7da2e8aed749ea9cade99d2c3099e 100644 (file)
@@ -42,6 +42,10 @@ properties:
     description:
       See section 2.3.9 of the DeviceTree Specification.
 
+  '#address-cells': true
+
+  '#size-cells': true
+
 required:
   - "#interconnect-cells"
   - compatible
@@ -59,6 +63,8 @@ examples:
         compatible = "allwinner,sun5i-a13-mbus";
         reg = <0x01c01000 0x1000>;
         clocks = <&ccu CLK_MBUS>;
+        #address-cells = <1>;
+        #size-cells = <1>;
         dma-ranges = <0x00000000 0x40000000 0x20000000>;
         #interconnect-cells = <1>;
     };
index de9a465096dbd887bac9c2829d1fe4d8855f311d..444aeea27db83c3cadceae5a7ba877569e3ad1f8 100644 (file)
@@ -91,7 +91,7 @@ required:
 
 examples:
   - |
-    vco1: clock@00 {
+    vco1: clock {
       compatible = "arm,impd1-vco1";
       #clock-cells = <0>;
       lock-offset = <0x08>;
index 9e90c2b009609b1c3603d3113c8cc3c9dd446efa..e73662c8d339c13dac09ba82b20d4ac3486cf17b 100644 (file)
@@ -119,7 +119,7 @@ examples:
         panel@0 {
                 compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
                 reg = <0>;
-                power-gpios = <&pio 1 7 0>; /* PB07 */
+                power-supply = <&reg_display>;
                 reset-gpios = <&r_pio 0 5 1>; /* PL05 */
                 backlight = <&pwm_bl>;
         };
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7123.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7123.txt
deleted file mode 100644 (file)
index a6b2b2b..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-Analog Device ADV7123 Video DAC
--------------------------------
-
-The ADV7123 is a digital-to-analog converter that outputs VGA signals from a
-parallel video input.
-
-Required properties:
-
-- compatible: Should be "adi,adv7123"
-
-Optional properties:
-
-- psave-gpios: Power save control GPIO
-
-Required nodes:
-
-The ADV7123 has two video ports. Their connections are modeled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for DPI input
-- Video port 1 for VGA output
-
-
-Example
--------
-
-       adv7123: encoder@0 {
-               compatible = "adi,adv7123";
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-
-                               adv7123_in: endpoint@0 {
-                                       remote-endpoint = <&dpi_out>;
-                               };
-                       };
-
-                       port@1 {
-                               reg = <1>;
-
-                               adv7123_out: endpoint@0 {
-                                       remote-endpoint = <&vga_connector_in>;
-                               };
-                       };
-               };
-       };
index e8ddec5d9d910a5a1d2d68684ffc9b4f4a822261..659523f538bfc0eee3fcf88a346c2ef08d40b039 100644 (file)
@@ -1,5 +1,5 @@
-Analog Device ADV7511(W)/13/33/35 HDMI Encoders
------------------------------------------
+Analog Devices ADV7511(W)/13/33/35 HDMI Encoders
+------------------------------------------------
 
 The ADV7511, ADV7511W, ADV7513, ADV7533 and ADV7535 are HDMI audio and video
 transmitters compatible with HDMI 1.4 and DVI 1.0. They support color space
index c211038699233b943ad3e10e93215ef30be40434..8c0e4f285fbcb9867dea7e5f4e17743e862903b4 100644 (file)
@@ -37,6 +37,12 @@ properties:
     type: object
 
     properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
       port@0:
         type: object
         description: |
@@ -51,6 +57,8 @@ properties:
     required:
       - port@0
 
+    additionalProperties: false
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
new file mode 100644 (file)
index 0000000..9f38f55
--- /dev/null
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2019,2020 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/chrontel,ch7033.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Chrontel CH7033 Video Encoder Device Tree Bindings
+
+maintainers:
+  - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+  compatible:
+    const: chrontel,ch7033
+
+  reg:
+    maxItems: 1
+    description: I2C address of the device
+
+  ports:
+    type: object
+
+    properties:
+      port@0:
+        type: object
+        description: |
+          Video port for RGB input.
+
+      port@1:
+        type: object
+        description: |
+          DVI port, should be connected to a node compatible with the
+          dvi-connector binding.
+
+    required:
+      - port@0
+      - port@1
+
+required:
+  - compatible
+  - reg
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        vga-dvi-encoder@76 {
+            compatible = "chrontel,ch7033";
+            reg = <0x76>;
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                port@0 {
+                    reg = <0>;
+                    endpoint {
+                        remote-endpoint = <&lcd0_rgb_out>;
+                    };
+                };
+
+                port@1 {
+                    reg = <1>;
+                    endpoint {
+                        remote-endpoint = <&dvi_in>;
+                    };
+                };
+
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
deleted file mode 100644 (file)
index 164cbb1..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-Dumb RGB to VGA DAC bridge
----------------------------
-
-This binding is aimed for dumb RGB to VGA DAC based bridges that do not require
-any configuration.
-
-Required properties:
-
-- compatible: Must be "dumb-vga-dac"
-
-Required nodes:
-
-This device has two video ports. Their connections are modelled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for RGB input
-- Video port 1 for VGA output
-
-Optional properties:
-- vdd-supply: Power supply for DAC
-
-Example
--------
-
-bridge {
-       compatible = "dumb-vga-dac";
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       ports {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               port@0 {
-                       reg = <0>;
-
-                       vga_bridge_in: endpoint {
-                               remote-endpoint = <&tcon0_out_vga>;
-                       };
-               };
-
-               port@1 {
-                       reg = <1>;
-
-                       vga_bridge_out: endpoint {
-                               remote-endpoint = <&vga_con_in>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt b/Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt
deleted file mode 100644 (file)
index b13adf3..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-Synopsys DesignWare MIPI DSI host controller
-============================================
-
-This document defines device tree properties for the Synopsys DesignWare MIPI
-DSI host controller. It doesn't constitue a device tree binding specification
-by itself but is meant to be referenced by platform-specific device tree
-bindings.
-
-When referenced from platform device tree bindings the properties defined in
-this document are defined as follows. The platform device tree bindings are
-responsible for defining whether each optional property is used or not.
-
-- reg: Memory mapped base address and length of the DesignWare MIPI DSI
-  host controller registers. (mandatory)
-
-- clocks: References to all the clocks specified in the clock-names property
-  as specified in [1]. (mandatory)
-
-- clock-names:
-  - "pclk" is the peripheral clock for either AHB and APB. (mandatory)
-  - "px_clk" is the pixel clock for the DPI/RGB input. (optional)
-
-- resets: References to all the resets specified in the reset-names property
-  as specified in [2]. (optional)
-
-- reset-names: string reset name, must be "apb" if used. (optional)
-
-- panel or bridge node: see [3]. (mandatory)
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/reset/reset.txt
-[3] Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
new file mode 100644 (file)
index 0000000..2c50016
--- /dev/null
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ite,it6505.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ITE it6505 Device Tree Bindings
+
+maintainers:
+  - Allen Chen <allen.chen@ite.com.tw>
+
+description: |
+  The IT6505 is a high-performance DisplayPort 1.1a transmitter,
+  fully compliant with DisplayPort 1.1a, HDCP 1.3 specifications.
+  The IT6505 supports color depth of up to 36 bits (12 bits/color)
+  and ensures robust transmission of high-quality uncompressed video
+  content, along with uncompressed and compressed digital audio content.
+
+  Aside from the various video output formats supported, the IT6505
+  also encodes and transmits up to 8 channels of I2S digital audio,
+  with sampling rate up to 192kHz and sample size up to 24 bits.
+  In addition, an S/PDIF input port takes in compressed audio of up to
+  192kHz frame rate.
+
+  Each IT6505 chip comes preprogrammed with an unique HDCP key,
+  in compliance with the HDCP 1.3 standard so as to provide secure
+  transmission of high-definition content. Users of the IT6505 need not
+  purchase any HDCP keys or ROMs.
+
+properties:
+  compatible:
+    const: ite,it6505
+
+  ovdd-supply:
+    maxItems: 1
+    description: I/O voltage
+
+  pwr18-supply:
+    maxItems: 1
+    description: core voltage
+
+  interrupts:
+    maxItems: 1
+    description: interrupt specifier of INT pin
+
+  reset-gpios:
+    maxItems: 1
+    description: gpio specifier of RESET pin
+
+  extcon:
+    maxItems: 1
+    description: extcon specifier for the Power Delivery
+
+  port:
+    type: object
+    description: A port node pointing to DPI host port node
+
+required:
+  - compatible
+  - ovdd-supply
+  - pwr18-supply
+  - interrupts
+  - reset-gpios
+  - extcon
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        dp-bridge@5c {
+            compatible = "ite,it6505";
+            interrupts = <152 IRQ_TYPE_EDGE_FALLING 152 0>;
+            reg = <0x5c>;
+            pinctrl-names = "default";
+            pinctrl-0 = <&it6505_pins>;
+            ovdd-supply = <&mt6358_vsim1_reg>;
+            pwr18-supply = <&it6505_pp18_reg>;
+            reset-gpios = <&pio 179 1>;
+            extcon = <&usbc_extcon>;
+
+            port {
+                it6505_in: endpoint {
+                    remote-endpoint = <&dpi_out>;
+                };
+            };
+        };
+    };
index 8f373029f5d217a07b2c0b37e988ea4cef559b92..800c63764e715f8711fca413aff75db7bbe1abae 100644 (file)
@@ -50,6 +50,12 @@ properties:
       This device has two video ports. Their connections are modeled using the
       OF graph bindings specified in Documentation/devicetree/bindings/graph.txt
     properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
       port@0:
         type: object
         description: |
@@ -66,6 +72,8 @@ properties:
       - port@0
       - port@1
 
+    additionalProperties: false
+
   powerdown-gpios:
     description:
       The GPIO used to control the power down line of this device.
diff --git a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
new file mode 100644 (file)
index 0000000..8aff2d6
--- /dev/null
@@ -0,0 +1,226 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/nwl-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Northwest Logic MIPI-DSI controller on i.MX SoCs
+
+maintainers:
+  - Guido Gúnther <agx@sigxcpu.org>
+  - Robert Chiras <robert.chiras@nxp.com>
+
+description: |
+  NWL MIPI-DSI host controller found on i.MX8 platforms. This is a dsi bridge for
+  the SOCs NWL MIPI-DSI host controller.
+
+properties:
+  compatible:
+    const: fsl,imx8mq-nwl-dsi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  '#address-cells':
+    const: 1
+
+  '#size-cells':
+    const: 0
+
+  clocks:
+    items:
+      - description: DSI core clock
+      - description: RX_ESC clock (used in escape mode)
+      - description: TX_ESC clock (used in escape mode)
+      - description: PHY_REF clock
+      - description: LCDIF clock
+
+  clock-names:
+    items:
+      - const: core
+      - const: rx_esc
+      - const: tx_esc
+      - const: phy_ref
+      - const: lcdif
+
+  mux-controls:
+    description:
+      mux controller node to use for operating the input mux
+
+  phys:
+    maxItems: 1
+    description:
+      A phandle to the phy module representing the DPHY
+
+  phy-names:
+    items:
+      - const: dphy
+
+  power-domains:
+    maxItems: 1
+
+  resets:
+    items:
+      - description: dsi byte reset line
+      - description: dsi dpi reset line
+      - description: dsi esc reset line
+      - description: dsi pclk reset line
+
+  reset-names:
+    items:
+      - const: byte
+      - const: dpi
+      - const: esc
+      - const: pclk
+
+  ports:
+    type: object
+    description:
+      A node containing DSI input & output port nodes with endpoint
+      definitions as documented in
+      Documentation/devicetree/bindings/graph.txt.
+    properties:
+      port@0:
+        type: object
+        description:
+          Input port node to receive pixel data from the
+          display controller. Exactly one endpoint must be
+          specified.
+        properties:
+          '#address-cells':
+            const: 1
+
+          '#size-cells':
+            const: 0
+
+          endpoint@0:
+            description: sub-node describing the input from LCDIF
+            type: object
+
+          endpoint@1:
+            description: sub-node describing the input from DCSS
+            type: object
+
+          reg:
+            const: 0
+
+        required:
+          - '#address-cells'
+          - '#size-cells'
+          - reg
+
+        oneOf:
+          - required:
+              - endpoint@0
+          - required:
+              - endpoint@1
+
+        additionalProperties: false
+
+      port@1:
+        type: object
+        description:
+          DSI output port node to the panel or the next bridge
+          in the chain
+
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
+    required:
+      - '#address-cells'
+      - '#size-cells'
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+patternProperties:
+  "^panel@[0-9]+$":
+    type: object
+
+required:
+  - '#address-cells'
+  - '#size-cells'
+  - clock-names
+  - clocks
+  - compatible
+  - interrupts
+  - mux-controls
+  - phy-names
+  - phys
+  - ports
+  - reg
+  - reset-names
+  - resets
+
+additionalProperties: false
+
+examples:
+ - |
+
+   #include <dt-bindings/clock/imx8mq-clock.h>
+   #include <dt-bindings/interrupt-controller/arm-gic.h>
+   #include <dt-bindings/reset/imx8mq-reset.h>
+
+   mipi_dsi: mipi_dsi@30a00000 {
+              #address-cells = <1>;
+              #size-cells = <0>;
+              compatible = "fsl,imx8mq-nwl-dsi";
+              reg = <0x30A00000 0x300>;
+              clocks = <&clk IMX8MQ_CLK_DSI_CORE>,
+                       <&clk IMX8MQ_CLK_DSI_AHB>,
+                       <&clk IMX8MQ_CLK_DSI_IPG_DIV>,
+                       <&clk IMX8MQ_CLK_DSI_PHY_REF>,
+                       <&clk IMX8MQ_CLK_LCDIF_PIXEL>;
+              clock-names = "core", "rx_esc", "tx_esc", "phy_ref", "lcdif";
+              interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+              mux-controls = <&mux 0>;
+              power-domains = <&pgc_mipi>;
+              resets = <&src IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N>,
+                       <&src IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N>,
+                       <&src IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N>,
+                       <&src IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N>;
+              reset-names = "byte", "dpi", "esc", "pclk";
+              phys = <&dphy>;
+              phy-names = "dphy";
+
+              panel@0 {
+                      #address-cells = <1>;
+                      #size-cells = <0>;
+                      compatible = "rocktech,jh057n00900";
+                      reg = <0>;
+                      port@0 {
+                           reg = <0>;
+                           panel_in: endpoint {
+                                     remote-endpoint = <&mipi_dsi_out>;
+                           };
+                      };
+              };
+
+              ports {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+
+                    port@0 {
+                           #size-cells = <0>;
+                           #address-cells = <1>;
+                           reg = <0>;
+                           mipi_dsi_in: endpoint@0 {
+                                        reg = <0>;
+                                        remote-endpoint = <&lcdif_mipi_dsi>;
+                           };
+                    };
+                    port@1 {
+                           reg = <1>;
+                           mipi_dsi_out: endpoint {
+                                         remote-endpoint = <&panel_in>;
+                           };
+                    };
+              };
+      };
index 5dff93641bea0d8ce384ce253fbbed78027f7a20..7e27cfcf770dd802c80805293b9c8aba93d674d8 100644 (file)
@@ -50,6 +50,12 @@ properties:
       Documentation/devicetree/bindings/media/video-interfaces.txt
       Documentation/devicetree/bindings/graph.txt
     properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
       port@0:
         type: object
         description: |
@@ -63,6 +69,8 @@ properties:
     required:
       - port@0
 
+    additionalProperties: false
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
new file mode 100644 (file)
index 0000000..0880cbf
--- /dev/null
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/simple-bridge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Transparent non-programmable DRM bridges
+
+maintainers:
+  - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  This binding supports transparent non-programmable bridges that don't require
+  any configuration, with a single input and a single output.
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+        - enum:
+          - ti,ths8134a
+          - ti,ths8134b
+        - const: ti,ths8134
+      - enum:
+        - adi,adv7123
+        - dumb-vga-dac
+        - ti,opa362
+        - ti,ths8134
+        - ti,ths8135
+
+  ports:
+    type: object
+    description: |
+      This device has two video ports. Their connections are modeled using the
+      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+    properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
+      port@0:
+        type: object
+        description: The bridge input
+
+      port@1:
+        type: object
+        description: The bridge output
+
+    required:
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+  enable-gpios:
+    maxItems: 1
+    description: GPIO controlling bridge enable
+
+  vdd-supply:
+    maxItems: 1
+    description: Power supply for the bridge
+
+required:
+  - compatible
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    bridge {
+        compatible = "ti,ths8134a", "ti,ths8134";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                vga_bridge_in: endpoint {
+                    remote-endpoint = <&tcon0_out_vga>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+
+                vga_bridge_out: endpoint {
+                    remote-endpoint = <&vga_con_in>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
new file mode 100644 (file)
index 0000000..012aa8e
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/snps,dw-mipi-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare MIPI DSI host controller
+
+maintainers:
+  - Philippe CORNU <philippe.cornu@st.com>
+
+description: |
+  This document defines device tree properties for the Synopsys DesignWare MIPI
+  DSI host controller. It doesn't constitue a device tree binding specification
+  by itself but is meant to be referenced by platform-specific device tree
+  bindings.
+
+  When referenced from platform device tree bindings the properties defined in
+  this document are defined as follows. The platform device tree bindings are
+  responsible for defining whether each property is required or optional.
+
+allOf:
+  - $ref: ../dsi-controller.yaml#
+
+properties:
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Module clock
+      - description: DSI bus clock for either AHB and APB
+      - description: Pixel clock for the DPI/RGB input
+    minItems: 2
+
+  clock-names:
+    items:
+      - const: ref
+      - const: pclk
+      - const: px_clk
+    minItems: 2
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    const: apb
+
+  ports:
+    type: object
+
+    properties:
+      port@0:
+        type: object
+        description: Input node to receive pixel data.
+      port@1:
+        type: object
+        description: DSI output node to panel.
+
+    required:
+      - port@0
+      - port@1
+
+required:
+  - clock-names
+  - clocks
+  - ports
+  - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
deleted file mode 100644 (file)
index d17d1e5..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-Thine Electronics THC63LVD1024 LVDS decoder
--------------------------------------------
-
-The THC63LVD1024 is a dual link LVDS receiver designed to convert LVDS streams
-to parallel data outputs. The chip supports single/dual input/output modes,
-handling up to two LVDS input streams and up to two digital CMOS/TTL outputs.
-
-Single or dual operation mode, output data mapping and DDR output modes are
-configured through input signals and the chip does not expose any control bus.
-
-Required properties:
-- compatible: Shall be "thine,thc63lvd1024"
-- vcc-supply: Power supply for TTL output, TTL CLOCKOUT signal, LVDS input,
-  PPL and digital circuitry
-
-Optional properties:
-- powerdown-gpios: Power down GPIO signal, pin name "/PDWN". Active low
-- oe-gpios: Output enable GPIO signal, pin name "OE". Active high
-
-The THC63LVD1024 video port connections are modeled according
-to OF graph bindings specified by Documentation/devicetree/bindings/graph.txt
-
-Required video port nodes:
-- port@0: First LVDS input port
-- port@2: First digital CMOS/TTL parallel output
-
-Optional video port nodes:
-- port@1: Second LVDS input port
-- port@3: Second digital CMOS/TTL parallel output
-
-The device can operate in single-link mode or dual-link mode. In single-link
-mode, all pixels are received on port@0, and port@1 shall not contain any
-endpoint. In dual-link mode, even-numbered pixels are received on port@0 and
-odd-numbered pixels on port@1, and both port@0 and port@1 shall contain
-endpoints.
-
-Example:
---------
-
-       thc63lvd1024: lvds-decoder {
-               compatible = "thine,thc63lvd1024";
-
-               vcc-supply = <&reg_lvds_vcc>;
-               powerdown-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-
-                               lvds_dec_in_0: endpoint {
-                                       remote-endpoint = <&lvds_out>;
-                               };
-                       };
-
-                       port@2{
-                               reg = <2>;
-
-                               lvds_dec_out_2: endpoint {
-                                       remote-endpoint = <&adv7511_in>;
-                               };
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
new file mode 100644 (file)
index 0000000..469ac4a
--- /dev/null
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/thine,thc63lvd1024.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Thine Electronics THC63LVD1024 LVDS Decoder
+
+maintainers:
+  - Jacopo Mondi <jacopo+renesas@jmondi.org>
+  - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description: |
+  The THC63LVD1024 is a dual link LVDS receiver designed to convert LVDS
+  streams to parallel data outputs. The chip supports single/dual input/output
+  modes, handling up to two LVDS input streams and up to two digital CMOS/TTL
+  outputs.
+
+  Single or dual operation mode, output data mapping and DDR output modes are
+  configured through input signals and the chip does not expose any control
+  bus.
+
+properties:
+  compatible:
+    const: thine,thc63lvd1024
+
+  ports:
+    type: object
+    description: |
+      This device has four video ports. Their connections are modeled using the
+      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+      The device can operate in single-link mode or dual-link mode. In
+      single-link mode, all pixels are received on port@0, and port@1 shall not
+      contain any endpoint. In dual-link mode, even-numbered pixels are
+      received on port@0 and odd-numbered pixels on port@1, and both port@0 and
+      port@1 shall contain endpoints.
+
+    properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
+      port@0:
+        type: object
+        description: First LVDS input port
+
+      port@1:
+        type: object
+        description: Second LVDS input port
+
+      port@2:
+        type: object
+        description: First digital CMOS/TTL parallel output
+
+      port@3:
+        type: object
+        description: Second digital CMOS/TTL parallel output
+
+    required:
+      - port@0
+      - port@2
+
+    additionalProperties: false
+
+  oe-gpios:
+    maxItems: 1
+    description: Output enable GPIO signal, pin name "OE", active high.
+
+  powerdown-gpios:
+    maxItems: 1
+    description: Power down GPIO signal, pin name "/PDWN", active low.
+
+  vcc-supply:
+    maxItems: 1
+    description:
+      Power supply for the TTL output, TTL CLOCKOUT signal, LVDS input, PLL and
+      digital circuitry.
+
+required:
+  - compatible
+  - ports
+  - vcc-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    lvds-decoder {
+        compatible = "thine,thc63lvd1024";
+
+        vcc-supply = <&reg_lvds_vcc>;
+        powerdown-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                lvds_dec_in_0: endpoint {
+                    remote-endpoint = <&lvds_out>;
+                };
+            };
+
+            port@2 {
+                reg = <2>;
+
+                lvds_dec_out_2: endpoint {
+                    remote-endpoint = <&adv7511_in>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ths813x.txt b/Documentation/devicetree/bindings/display/bridge/ti,ths813x.txt
deleted file mode 100644 (file)
index df3d7c1..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-THS8134 and THS8135 Video DAC
------------------------------
-
-This is the binding for Texas Instruments THS8134, THS8134A, THS8134B and
-THS8135 Video DAC bridges.
-
-Required properties:
-
-- compatible: Must be one of
-  "ti,ths8134"
-  "ti,ths8134a," "ti,ths8134"
-  "ti,ths8134b", "ti,ths8134"
-  "ti,ths8135"
-
-Required nodes:
-
-This device has two video ports. Their connections are modelled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for RGB input
-- Video port 1 for VGA output
-
-Example
--------
-
-vga-bridge {
-       compatible = "ti,ths8135";
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       ports {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               port@0 {
-                       reg = <0>;
-
-                       vga_bridge_in: endpoint {
-                               remote-endpoint = <&lcdc_out_vga>;
-                       };
-               };
-
-               port@1 {
-                       reg = <1>;
-
-                       vga_bridge_out: endpoint {
-                               remote-endpoint = <&vga_con_in>;
-                       };
-               };
-       };
-};
index fd986c36c7373889a5da1494bb0ffbb922fc5e23..85b71b1fd28a28b4bc32e45ec49df508e3efdcc2 100644 (file)
@@ -28,7 +28,7 @@ description: |
 
 properties:
   $nodename:
-    pattern: "^dsi-controller(@.*)?$"
+    pattern: "^dsi(@.*)?$"
 
   "#address-cells":
     const: 1
@@ -76,7 +76,7 @@ patternProperties:
 examples:
   - |
     #include <dt-bindings/gpio/gpio.h>
-    dsi-controller@a0351000 {
+    dsi@a0351000 {
         reg = <0xa0351000 0x1000>;
         #address-cells = <1>;
         #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt b/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt
deleted file mode 100644 (file)
index 0601a9e..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-ARM Versatile TFT Panels
-
-These panels are connected to the daughterboards found on the
-ARM Versatile reference designs.
-
-This device node must appear as a child to a "syscon"-compatible
-node.
-
-Required properties:
-- compatible: should be "arm,versatile-tft-panel"
-
-Required subnodes:
-- port: see display/panel/panel-common.yaml, graph.txt
-
-
-Example:
-
-sysreg@0 {
-       compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
-       reg = <0x00000 0x1000>;
-
-       panel: display@0 {
-               compatible = "arm,versatile-tft-panel";
-
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&foo>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml b/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
new file mode 100644 (file)
index 0000000..41fd571
--- /dev/null
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/arm,versatile-tft-panel.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Versatile TFT Panels
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  These panels are connected to the daughterboards found on the
+  ARM Versatile reference designs.
+
+  This device node must appear as a child to a "syscon"-compatible
+  node.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: arm,versatile-tft-panel
+
+  port: true
+
+required:
+  - compatible
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    sysreg {
+        compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
+        reg = <0x00000 0x1000>;
+
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel {
+            compatible = "arm,versatile-tft-panel";
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&foo>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml b/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml
new file mode 100644 (file)
index 0000000..083d2b9
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/asus,z00t-tm5p5-nt35596.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASUS Z00T TM5P5 NT35596 5.5" 1080×1920 LCD Panel
+
+maintainers:
+  - Konrad Dybcio <konradybcio@gmail.com>
+
+description: |+
+  This panel seems to only be found in the Asus Z00T
+  smartphone and we have no straightforward way of
+  actually getting the correct model number,
+  as no schematics are released publicly.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: asus,z00t-tm5p5-n35596
+  reg: true
+  reset-gpios: true
+  vdd-supply:
+     description: core voltage supply
+  vddio-supply:
+     description: vddio supply
+
+required:
+  - compatible
+  - reg
+  - vdd-supply
+  - vddio-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+            #address-cells = <1>;
+            #size-cells = <0>;
+            panel@0 {
+                    reg = <0>;
+
+                    compatible = "asus,z00t-tm5p5-n35596";
+
+                    vdd-supply = <&pm8916_l8>;
+                    vddio-supply = <&pm8916_l6>;
+                    reset-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+            };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt b/Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt
deleted file mode 100644 (file)
index 3caea21..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Boe Himax8279d 1200x1920 TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,himax8279d8p" and one of: "boe,himax8279d10p"
-- reg: DSI virtual channel of the peripheral
-- enable-gpios: panel enable gpio
-- pp33-gpios: a GPIO phandle for the 3.3v pin that provides the supply voltage
-- pp18-gpios: a GPIO phandle for the 1.8v pin that provides the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
-       &mipi_dsi {
-               panel {
-                       compatible = "boe,himax8279d8p", "boe,himax8279d10p";
-                       reg = <0>;
-                       backlight = <&backlight>;
-                       enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
-                       pp33-gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
-                       pp18-gpios = <&gpio 36 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml b/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
new file mode 100644 (file)
index 0000000..272a3a0
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,himax8279d.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Boe Himax8279d 1200x1920 TFT LCD panel
+
+maintainers:
+  - Jerry Han <jerry.han.hq@gmail.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - const: boe,himax8279d8p
+      - const: boe,himax8279d10p
+
+  backlight: true
+  enable-gpios: true
+  reg: true
+
+  pp33-gpios:
+    maxItems: 1
+    description: GPIO for the 3.3v pin that provides the supply voltage
+
+  pp18-gpios:
+    maxItems: 1
+    description: GPIO for the 1.8v pin that provides the supply voltage
+
+required:
+  - compatible
+  - reg
+  - enable-gpios
+  - pp33-gpios
+  - pp18-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "boe,himax8279d8p", "boe,himax8279d10p";
+            reg = <0>;
+            backlight = <&backlight>;
+            enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
+            pp33-gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
+            pp18-gpios = <&gpio 36 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
index 74021345913428f3d67ef4f2c1598799cba796c7..7f5df585101731a4edd7869849ea4125802ba437 100644 (file)
@@ -24,6 +24,8 @@ properties:
       - boe,tv101wum-n53
         # AUO B101UAN08.3 10.1" WUXGA TFT LCD panel
       - auo,b101uan08.3
+        # BOE TV105WUM-NW0 10.5" WUXGA TFT LCD panel
+      - boe,tv105wum-nw0
 
   reg:
     description: the virtual channel number of a DSI peripheral
index c8c0c9cb0492bffe714252277dd02c8dc9caef06..56903ded005e5c6206a96c7ea37cb8bd8664e390 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/display/panel/display-timings.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: display timing bindings
+title: display timings bindings
 
 maintainers:
   - Thierry Reding <thierry.reding@gmail.com>
@@ -14,7 +14,7 @@ maintainers:
 description: |
   A display panel may be able to handle several display timings,
   with different resolutions.
-  The display-timings node makes it possible to specify the timing
+  The display-timings node makes it possible to specify the timings
   and to specify the timing that is native for the display.
 
 properties:
@@ -25,8 +25,8 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle
     description: |
       The default display timing is the one specified as native-mode.
-      If no native-mode is specified then the first node is assumed the
-      native mode.
+      If no native-mode is specified then the first node is assumed
+      to be the native mode.
 
 patternProperties:
   "^timing":
diff --git a/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt b/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
deleted file mode 100644 (file)
index 82caa7b..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel
-
-Required properties:
-- compatible: must be "feiyang,fy07024di26a30d"
-- reg: DSI virtual channel used by that screen
-- avdd-supply: analog regulator dc1 switch
-- dvdd-supply: 3v3 digital regulator
-- reset-gpios: a GPIO phandle for the reset pin
-
-Optional properties:
-- backlight: phandle for the backlight control.
-
-panel@0 {
-       compatible = "feiyang,fy07024di26a30d";
-       reg = <0>;
-       avdd-supply = <&reg_dc1sw>;
-       dvdd-supply = <&reg_dldo2>;
-       reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
-       backlight = <&backlight>;
-};
diff --git a/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml b/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
new file mode 100644 (file)
index 0000000..95acf9e
--- /dev/null
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/feiyang,fy07024di26a30d.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel
+
+maintainers:
+  - Jagan Teki <jagan@amarulasolutions.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: feiyang,fy07024di26a30d
+
+  reg:
+    description: DSI virtual channel used by that screen
+    maxItems: 1
+
+  avdd-supply:
+    description: analog regulator dc1 switch
+
+  dvdd-supply:
+    description: 3v3 digital regulator
+
+  reset-gpios: true
+
+  backlight: true
+
+required:
+  - compatible
+  - reg
+  - avdd-supply
+  - dvdd-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "feiyang,fy07024di26a30d";
+            reg = <0>;
+            avdd-supply = <&reg_dc1sw>;
+            dvdd-supply = <&reg_dldo2>;
+            reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
+            backlight = <&backlight>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt
deleted file mode 100644 (file)
index 3d5ce6a..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-Ilitek ILI9322 TFT panel driver with SPI control bus
-
-This is a driver for 320x240 TFT panels, accepting a variety of input
-streams that get adapted and scaled to the panel. The panel output has
-960 TFT source driver pins and 240 TFT gate driver pins, VCOM, VCOML and
-VCOMH outputs.
-
-Required properties:
-  - compatible: "dlink,dir-685-panel", "ilitek,ili9322"
-    (full system-specific compatible is always required to look up configuration)
-  - reg: address of the panel on the SPI bus
-
-Optional properties:
-  - vcc-supply: core voltage supply, see regulator/regulator.txt
-  - iovcc-supply: voltage supply for the interface input/output signals,
-    see regulator/regulator.txt
-  - vci-supply: voltage supply for analog parts, see regulator/regulator.txt
-  - reset-gpios: a GPIO spec for the reset pin, see gpio/gpio.txt
-
-  The following optional properties only apply to RGB and YUV input modes and
-  can be omitted for BT.656 input modes:
-
-  - pixelclk-active: see display/panel/display-timing.txt
-  - de-active: see display/panel/display-timing.txt
-  - hsync-active: see display/panel/display-timing.txt
-  - vsync-active: see display/panel/display-timing.txt
-
-The panel must obey the rules for a SPI slave device as specified in
-spi/spi-bus.txt
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in
-media/video-interfaces.txt. This node should describe panel's video bus.
-
-Example:
-
-panel: display@0 {
-       compatible = "dlink,dir-685-panel", "ilitek,ili9322";
-       reg = <0>;
-       vcc-supply = <&vdisp>;
-       iovcc-supply = <&vdisp>;
-       vci-supply = <&vdisp>;
-
-       port {
-               panel_in: endpoint {
-                       remote-endpoint = <&display_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
new file mode 100644 (file)
index 0000000..177d48c
--- /dev/null
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/ilitek,ili9322.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI9322 TFT panel driver with SPI control bus
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  This is a driver for 320x240 TFT panels, accepting a variety of input
+  streams that get adapted and scaled to the panel. The panel output has
+  960 TFT source driver pins and 240 TFT gate driver pins, VCOM, VCOML and
+  VCOMH outputs.
+
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+        - dlink,dir-685-panel
+
+      - const: ilitek,ili9322
+
+  reset-gpios: true
+  port: true
+
+  vcc-supply:
+    description: Core voltage supply
+
+  iovcc-supply:
+    description: Voltage supply for the interface input/output signals
+
+  vci-supply:
+    description: Voltage supply for analog parts
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel: display@0 {
+            compatible = "dlink,dir-685-panel", "ilitek,ili9322";
+            reg = <0>;
+            vcc-supply = <&vdisp>;
+            iovcc-supply = <&vdisp>;
+            vci-supply = <&vdisp>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&display_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
deleted file mode 100644 (file)
index 4a041ac..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Ilitek ILI9881c based MIPI-DSI panels
-
-Required properties:
-  - compatible: must be "ilitek,ili9881c" and one of:
-    * "bananapi,lhr050h41"
-  - reg: DSI virtual channel used by that screen
-  - power-supply: phandle to the power regulator
-  - reset-gpios: a GPIO phandle for the reset pin
-
-Optional properties:
-  - backlight: phandle to the backlight used
-
-Example:
-panel@0 {
-       compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
-       reg = <0>;
-       power-supply = <&reg_display>;
-       reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
-       backlight = <&pwm_bl>;
-};
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
new file mode 100644 (file)
index 0000000..a393322
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/ilitek,ili9881c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI9881c based MIPI-DSI panels
+
+maintainers:
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    items:
+      - enum:
+        - bananapi,lhr050h41
+
+      - const: ilitek,ili9881c
+
+  backlight: true
+  power-supply: true
+  reg: true
+  reset-gpios: true
+
+required:
+  - compatible
+  - power-supply
+  - reg
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+            reg = <0>;
+            power-supply = <&reg_display>;
+            reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+            backlight = <&pwm_bl>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
deleted file mode 100644 (file)
index d1cab3a..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Innolux P097PFG 9.7" 1536x2048 TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,p097pfg"
-- reg: DSI virtual channel of the peripheral
-- avdd-supply: phandle of the regulator that provides positive voltage
-- avee-supply: phandle of the regulator that provides negative voltage
-- enable-gpios: panel enable gpio
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
-       &mipi_dsi {
-               panel@0 {
-                       compatible = "innolux,p079zca";
-                       reg = <0>;
-                       avdd-supply = <...>;
-                       avee-supply = <...>;
-                       backlight = <&backlight>;
-                       enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
new file mode 100644 (file)
index 0000000..5a5f071
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/innolux,p097pfg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Innolux P097PFG 9.7" 1536x2048 TFT LCD panel
+
+maintainers:
+  - Lin Huang <hl@rock-chips.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: innolux,p097pfg
+
+  backlight: true
+  enable-gpios: true
+  reg: true
+
+  avdd-supply:
+    description: The regulator that provides positive voltage
+
+  avee-supply:
+    description: The regulator that provides negative voltage
+
+required:
+  - compatible
+  - reg
+  - avdd-supply
+  - avee-supply
+  - enable-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "innolux,p097pfg";
+            reg = <0>;
+            avdd-supply = <&avdd>;
+            avee-supply = <&avee>;
+            backlight = <&backlight>;
+            enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt
deleted file mode 100644 (file)
index 513f034..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "innolux,p120zdg-bf1"
-- power-supply: regulator to provide the supply voltage
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- no-hpd: If HPD isn't hooked up; add this property.
-
-Example:
-       panel_edp: panel-edp {
-               compatible = "innolux,p120zdg-bf1";
-               enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
-               power-supply = <&pm8916_l2>;
-               backlight = <&backlight>;
-               no-hpd;
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.yaml b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.yaml
new file mode 100644 (file)
index 0000000..243dac2
--- /dev/null
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/innolux,p120zdg-bf1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
+
+maintainers:
+  - Sandeep Panda <spanda@codeaurora.org>
+  - Douglas Anderson <dianders@chromium.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: innolux,p120zdg-bf1
+
+  enable-gpios: true
+  power-supply: true
+  backlight: true
+  no-hpd: true
+
+required:
+  - compatible
+  - power-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    panel_edp: panel-edp {
+        compatible = "innolux,p120zdg-bf1";
+        enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
+        power-supply = <&pm8916_l2>;
+        backlight = <&backlight>;
+        no-hpd;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt
deleted file mode 100644 (file)
index 4989c91..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-JDI model LT070ME05000 1200x1920 7" DSI Panel
-
-Required properties:
-- compatible: should be "jdi,lt070me05000"
-- vddp-supply: phandle of the regulator that provides the supply voltage
-  Power IC supply (3-5V)
-- iovcc-supply: phandle of the regulator that provides the supply voltage
-  IOVCC , power supply for LCM (1.8V)
-- enable-gpios: phandle of gpio for enable line
-  LED_EN, LED backlight enable, High active
-- reset-gpios: phandle of gpio for reset line
-  This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
-  XRES, Reset, Low active
-- dcdc-en-gpios: phandle of the gpio for power ic line
-  Power IC supply enable, High active
-
-Example:
-
-       dsi0: qcom,mdss_dsi@4700000 {
-               panel@0 {
-                       compatible = "jdi,lt070me05000";
-                       reg = <0>;
-
-                       vddp-supply = <&pm8921_l17>;
-                       iovcc-supply = <&pm8921_lvs7>;
-
-                       enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
-                       reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>;
-                       dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
new file mode 100644 (file)
index 0000000..b8b9435
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/jdi,lt070me05000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: JDI model LT070ME05000 1200x1920 7" DSI Panel
+
+maintainers:
+  - Vinay Simha BN <simhavcs@gmail.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: jdi,lt070me05000
+
+  enable-gpios: true
+  reg: true
+  reset-gpios: true
+
+  vddp-supply:
+    description: |
+      The regulator that provides the supply voltage Power IC supply (3-5V)
+
+  iovcc-supply:
+    description: |
+      The regulator that provides the supply voltage IOVCC,
+      power supply for LCM (1.8V)
+
+  dcdc-en-gpios:
+    description: |
+      phandle of the gpio for power ic line
+      Power IC supply enable, High active
+
+required:
+  - compatible
+  - reg
+  - vddp-supply
+  - iovcc-supply
+  - enable-gpios
+  - reset-gpios
+  - dcdc-en-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "jdi,lt070me05000";
+            reg = <0>;
+
+            vddp-supply = <&pm8921_l17>;
+            iovcc-supply = <&pm8921_lvs7>;
+
+            enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+            reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>;
+            dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.txt
deleted file mode 100644 (file)
index fa95960..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-King Display KD035G6-54NT 3.5" (320x240 pixels) 24-bit TFT LCD panel
-
-Required properties:
-- compatible: should be "kingdisplay,kd035g6-54nt"
-- power-supply: See panel-common.txt
-- reset-gpios: See panel-common.txt
-
-Optional properties:
-- backlight: see panel-common.txt
-
-The generic bindings for the SPI slaves documented in [1] also apply.
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [2]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[2]: Documentation/devicetree/bindings/graph.txt
-
-Example:
-
-&spi {
-       panel@0 {
-               compatible = "kingdisplay,kd035g6-54nt";
-               reg = <0>;
-
-               spi-max-frequency = <3125000>;
-               spi-3wire;
-               spi-cs-high;
-
-               reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
-
-               backlight = <&backlight>;
-               power-supply = <&ldo6>;
-
-               port {
-                       panel_input: endpoint {
-                               remote-endpoint = <&panel_output>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
new file mode 100644 (file)
index 0000000..6960036
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/kingdisplay,kd035g6-54nt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: King Display KD035G6-54NT 3.5" (320x240 pixels) 24-bit TFT LCD panel
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Paul Cercueil <paul@crapouillou.net>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: kingdisplay,kd035g6-54nt
+
+  backlight: true
+  port: true
+  power-supply: true
+  reg: true
+  reset-gpios: true
+
+required:
+  - compatible
+  - power-supply
+  - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "kingdisplay,kd035g6-54nt";
+            reg = <0>;
+
+            spi-max-frequency = <3125000>;
+            spi-3wire;
+            spi-cs-high;
+
+            reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
+
+            backlight = <&backlight>;
+            power-supply = <&ldo6>;
+
+            port {
+                panel_input: endpoint {
+                    remote-endpoint = <&panel_output>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
deleted file mode 100644 (file)
index cfefff6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
-
-Required properties:
-- compatible: should be "kingdisplay,kd097d04"
-- reg: DSI virtual channel of the peripheral
-- power-supply: phandle of the regulator that provides the supply voltage
-- enable-gpios: panel enable gpio
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
-       &mipi_dsi {
-               panel@0 {
-                       compatible = "kingdisplay,kd097d04";
-                       reg = <0>;
-                       power-supply = <...>;
-                       backlight = <&backlight>;
-                       enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
new file mode 100644 (file)
index 0000000..a372bdc
--- /dev/null
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/leadtek,ltk050h3146w.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Leadtek LTK050H3146W 5.0in 720x1280 DSI panel
+
+maintainers:
+  - Heiko Stuebner <heiko.stuebner@theobroma-systems.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    enum:
+      - leadtek,ltk050h3146w
+      - leadtek,ltk050h3146w-a2
+  reg: true
+  backlight: true
+  reset-gpios: true
+  iovcc-supply:
+     description: regulator that supplies the iovcc voltage
+  vci-supply:
+     description: regulator that supplies the vci voltage
+
+required:
+  - compatible
+  - reg
+  - backlight
+  - iovcc-supply
+  - vci-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "leadtek,ltk050h3146w";
+            reg = <0>;
+            backlight = <&backlight>;
+            iovcc-supply = <&vcc_1v8>;
+            vci-supply = <&vcc3v3_lcd>;
+        };
+    };
+
+...
index fd931b29381670bcf271f926b9543522f06d260c..b900973b5f7b7622fba9ccd3f4b5720a61e159cf 100644 (file)
@@ -37,7 +37,6 @@ examples:
     dsi {
         #address-cells = <1>;
         #size-cells = <0>;
-        reg = <0xff450000 0x1000>;
 
         panel@0 {
             compatible = "leadtek,ltk500hd1829";
diff --git a/Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt b/Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt
deleted file mode 100644 (file)
index fc1e1b3..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
-
-Required properties:
-- compatible: must be "lg,acx467akm-7"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.txt b/Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.txt
deleted file mode 100644 (file)
index 5e649cb..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-LG Corporation 7" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,ld070wx3-sl01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lg4573.txt b/Documentation/devicetree/bindings/display/panel/lg,lg4573.txt
deleted file mode 100644 (file)
index 824441f..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-LG LG4573 TFT Liquid Crystal Display with SPI control bus
-
-Required properties:
-  - compatible: "lg,lg4573"
-  - reg: address of the panel on the SPI bus
-
-The panel must obey rules for SPI slave device specified in document [1].
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Example:
-
-       lcd_panel: display@0 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "lg,lg4573";
-               spi-max-frequency = <10000000>;
-               reg = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml b/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
new file mode 100644 (file)
index 0000000..b4314ce
--- /dev/null
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/lg,lg4573.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LG LG4573 TFT Liquid Crystal Display with SPI control bus
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Heiko Schocher <hs@denx.de>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: lg,lg4573
+
+  reg: true
+  spi-max-frequency: true
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        lcd_panel: display@0 {
+            compatible = "lg,lg4573";
+            spi-max-frequency = <10000000>;
+            reg = <0>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lh500wx1-sd03.txt b/Documentation/devicetree/bindings/display/panel/lg,lh500wx1-sd03.txt
deleted file mode 100644 (file)
index a04fd2b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-LG Corporation 5" HD TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lh500wx1-sd03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.txt b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.txt
deleted file mode 100644 (file)
index 1a1e653..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-LG.Philips LB035Q02 Panel
-=========================
-
-Required properties:
-- compatible: "lgphilips,lb035q02"
-- enable-gpios: panel enable gpio
-
-Optional properties:
-- label: a symbolic name for the panel
-
-Required nodes:
-- Video port for DPI input
-
-Example
--------
-
-lcd-panel: panel@0 {
-       compatible = "lgphilips,lb035q02";
-       reg = <0>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       label = "lcd";
-
-       enable-gpios = <&gpio7 7 0>;
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
new file mode 100644 (file)
index 0000000..830e335
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/lgphilips,lb035q02.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LG.Philips LB035Q02 Panel
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: lgphilips,lb035q02
+
+  label: true
+  enable-gpios: true
+  port: true
+
+required:
+  - compatible
+  - enable-gpios
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel: panel@0 {
+            compatible = "lgphilips,lb035q02";
+            reg = <0>;
+            spi-max-frequency = <100000>;
+            spi-cpol;
+            spi-cpha;
+
+            label = "lcd";
+
+            enable-gpios = <&gpio7 7 0>;
+
+            port {
+                lcd_in: endpoint {
+                    remote-endpoint = <&dpi_out>;
+                };
+            };
+        };
+    };
+
+...
index d0083301acbefbe78a1d528a413ae2f2a24f73a2..946dd354256c11d2e461f4810e8b094ef08117f3 100644 (file)
@@ -96,12 +96,20 @@ properties:
       If set, reverse the bit order described in the data mappings below on all
       data lanes, transmitting bits for slots 6 to 0 instead of 0 to 6.
 
+  port: true
+  ports: true
+
 required:
   - compatible
   - data-mapping
   - width-mm
   - height-mm
   - panel-timing
-  - port
+
+oneOf:
+  - required:
+      - port
+  - required:
+      - ports
 
 ...
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
deleted file mode 100644 (file)
index a89f9c8..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-Binding for Olimex Ltd. LCD-OLinuXino bridge panel.
-
-This device can be used as bridge between a host controller and LCD panels.
-Currently supported LCDs are:
-  - LCD-OLinuXino-4.3TS
-  - LCD-OLinuXino-5
-  - LCD-OLinuXino-7
-  - LCD-OLinuXino-10
-
-The panel itself contains:
-  - AT24C16C EEPROM holding panel identification and timing requirements
-  - AR1021 resistive touch screen controller (optional)
-  - FT5x6 capacitive touch screnn controller (optional)
-  - GT911/GT928 capacitive touch screen controller (optional)
-
-The above chips share same I2C bus. The EEPROM is factory preprogrammed with
-device information (id, serial, etc.) and timing requirements.
-
-Touchscreen bingings can be found in these files:
-  - input/touchscreen/goodix.txt
-  - input/touchscreen/edt-ft5x06.txt
-  - input/touchscreen/ar1021.txt
-
-Required properties:
-  - compatible: should be "olimex,lcd-olinuxino"
-  - reg: address of the configuration EEPROM, should be <0x50>
-  - power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-  - enable-gpios: GPIO pin to enable or disable the panel
-  - backlight: phandle of the backlight device attacked to the panel
-
-Example:
-&i2c2 {
-       panel@50 {
-               compatible = "olimex,lcd-olinuxino";
-               reg = <0x50>;
-               power-supply = <&reg_vcc5v0>;
-               enable-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>;
-               backlight = <&backlight>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
new file mode 100644 (file)
index 0000000..2329d96
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/olimex,lcd-olinuxino.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for Olimex Ltd. LCD-OLinuXino bridge panel.
+
+maintainers:
+  - Stefan Mavrodiev <stefan@olimex.com>
+
+description: |
+  This device can be used as bridge between a host controller and LCD panels.
+  Currently supported LCDs are:
+    - LCD-OLinuXino-4.3TS
+    - LCD-OLinuXino-5
+    - LCD-OLinuXino-7
+    - LCD-OLinuXino-10
+
+  The panel itself contains:
+    - AT24C16C EEPROM holding panel identification and timing requirements
+    - AR1021 resistive touch screen controller (optional)
+    - FT5x6 capacitive touch screnn controller (optional)
+    - GT911/GT928 capacitive touch screen controller (optional)
+
+  The above chips share same I2C bus. The EEPROM is factory preprogrammed with
+  device information (id, serial, etc.) and timing requirements.
+
+  Touchscreen bingings can be found in these files:
+    - input/touchscreen/goodix.yaml
+    - input/touchscreen/edt-ft5x06.txt
+    - input/touchscreen/ar1021.txt
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: olimex,lcd-olinuxino
+
+  backlight: true
+  enable-gpios: true
+  power-supply: true
+  reg: true
+
+required:
+  - compatible
+  - reg
+  - power-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@50 {
+            compatible = "olimex,lcd-olinuxino";
+            reg = <0x50>;
+            power-supply = <&reg_vcc5v0>;
+            enable-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>;
+            backlight = <&backlight>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt
deleted file mode 100644 (file)
index 9d88e96..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
-
-The panel is similar to OSD101T2045-53TS, but it needs additional
-MIPI_DSI_TURN_ON_PERIPHERAL message from the host.
-
-Required properties:
-- compatible: should be "osddisplays,osd101t2587-53ts"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
index ed051ba12084b786fb0e3a767c46e8499f178f00..a747b755ad0609a1af45780ad626f4afccd290b1 100644 (file)
@@ -63,9 +63,9 @@ properties:
 
   display-timings:
     description:
-      Some display panels supports several resolutions with different timing.
+      Some display panels support several resolutions with different timings.
       The display-timings bindings supports specifying several timings and
-      optional specify which is the native mode.
+      optionally specifying which is the native mode.
     allOf:
       - $ref: display-timings.yaml#
 
@@ -96,6 +96,12 @@ properties:
       (hot plug detect) signal, but the signal isn't hooked up so we should
       hardcode the max delay from the panel spec when powering up the panel.
 
+  hpd-gpios:
+    maxItems: 1
+    description:
+      If Hot Plug Detect (HPD) is connected to a GPIO in the system rather
+      than a dedicated HPD pin the pin can be specified here.
+
   # Control I/Os
 
   # Many display panels can be controlled through pins driven by GPIOs. The nature
@@ -124,6 +130,13 @@ properties:
       while active. Active high reset signals can be supported by inverting the
       GPIO specifier polarity flag.
 
+  te-gpios:
+    maxItems: 1
+    description:
+      GPIO spec for the tearing effect synchronization signal.
+      The tearing effect signal is active high. Active low signals can be
+      supported by inverting the GPIO specifier polarity flag.
+
   # Power
   power-supply:
     description:
index b2e8742fd6af8cc9f099517f4b7abdd54da5c306..16778ce782fc2bde3bef74bfddc741281eeee159 100644 (file)
@@ -29,6 +29,20 @@ properties:
       # compatible must be listed in alphabetical order, ordered by compatible.
       # The description in the comment is mandatory for each compatible.
 
+        # AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+      - auo,b080uan01
+        # Boe Corporation 8.0" WUXGA TFT LCD panel
+      - boe,tv080wum-nl0
+        # Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
+      - kingdisplay,kd097d04
+        # LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
+      - lg,acx467akm-7
+        # LG Corporation 7" WXGA TFT LCD panel
+      - lg,ld070wx3-sl01
+        # One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
+      - osddisplays,osd101t2587-53ts
+        # Panasonic 10" WUXGA TFT LCD panel
+      - panasonic,vvx10f004b00
         # Panasonic 10" WUXGA TFT LCD panel
       - panasonic,vvx10f034n00
 
index 393ffc6acbba1cae8a7fd596debf3209ac3b5409..d6cca1479633ab01b015b6be26b897ed87a9f2ab 100644 (file)
@@ -33,8 +33,6 @@ properties:
       - ampire,am-480272h3tmqw-t01h
         # Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
       - ampire,am800480r3tmqwa1h
-        # AU Optronics Corporation 8.0" WUXGA TFT LCD panel
-      - auo,b080uan01
         # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
       - auo,b101aw03
         # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
@@ -55,10 +53,16 @@ properties:
       - auo,g101evn010
         # AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
       - auo,g104sn02
+        # AU Optronics Corporation 12.1" (1280x800) TFT LCD panel
+      - auo,g121ean01
         # AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
       - auo,g133han01
+        # AU Optronics Corporation 15.6" (1366x768) TFT LCD panel
+      - auo,g156xtn01
         # AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
       - auo,g185han01
+        # AU Optronics Corporation 19.0" (1280x1024) TFT LCD panel
+      - auo,g190ean01
         # AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
       - auo,p320hvn03
         # AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
@@ -69,10 +73,12 @@ properties:
       - boe,hv070wsa-100
         # BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
       - boe,nv101wxmn51
+        # BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
+      - boe,nv133fhm-n61
+        # BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
+      - boe,nv133fhm-n62
         # BOE NV140FHM-N49 14.0" FHD a-Si FT panel
       - boe,nv140fhmn49
-        # Boe Corporation 8.0" WUXGA TFT LCD panel
-      - boe,tv080wum-nl0
         # CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
       - cdtech,s043wq26h-ct7
         # CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
@@ -82,6 +88,8 @@ properties:
         # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
       - chunghwa,claa101wa01a
         # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+      - chunghwa,claa101wb01
+        # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
       - chunghwa,claa101wb03
         # DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
       - dataimage,scf0700c48ggu18
@@ -127,6 +135,8 @@ properties:
       - hannstar,hsd100pxn1
         # Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
       - hit,tx23d38vm0caa
+        # InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel
+      - ivo,m133nwf4-r0
         # Innolux AT043TN24 4.3" WQVGA TFT LCD panel
       - innolux,at043tn24
         # Innolux AT070TN92 7.0" WQVGA TFT LCD panel
@@ -155,6 +165,8 @@ properties:
       - lemaker,bl035-rgb-002
         # LG 7" (800x480 pixels) TFT LCD panel
       - lg,lb070wv8
+        # LG Corporation 5" HD TFT LCD panel
+      - lg,lh500wx1-sd03
         # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
       - lg,lp079qx1-sp0v
         # LG 9.7" (2048x1536 pixels) TFT LCD panel
@@ -227,6 +239,8 @@ properties:
       - sharp,ls020b1dd01d
         # Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
       - shelly,sca07010-bfn-lnn
+        # Starry KR070PE2T 7" WVGA TFT LCD panel
+      - starry,kr070pe2t
         # Starry 12.2" (1920x1200 pixels) TFT LCD panel
       - starry,kr122ea0sra
         # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
deleted file mode 100644 (file)
index 1042469..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-Raydium RM67171 OLED LCD panel with MIPI-DSI protocol
-
-Required properties:
-- compatible:          "raydium,rm67191"
-- reg:                 virtual channel for MIPI-DSI protocol
-                       must be <0>
-- dsi-lanes:           number of DSI lanes to be used
-                       must be <3> or <4>
-- port:                input port node with endpoint definition as
-                       defined in Documentation/devicetree/bindings/graph.txt;
-                       the input port should be connected to a MIPI-DSI device
-                       driver
-
-Optional properties:
-- reset-gpios:         a GPIO spec for the RST_B GPIO pin
-- v3p3-supply:         phandle to 3.3V regulator that powers the VDD_3V3 pin
-- v1p8-supply:         phandle to 1.8V regulator that powers the VDD_1V8 pin
-- width-mm:            see panel-common.txt
-- height-mm:           see panel-common.txt
-- video-mode:          0 - burst-mode
-                       1 - non-burst with sync event
-                       2 - non-burst with sync pulse
-
-Example:
-
-       panel@0 {
-               compatible = "raydium,rm67191";
-               reg = <0>;
-               pinctrl-0 = <&pinctrl_mipi_dsi_0_1_en>;
-               pinctrl-names = "default";
-               reset-gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
-               dsi-lanes = <4>;
-               width-mm = <68>;
-               height-mm = <121>;
-
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&mipi_out>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
new file mode 100644 (file)
index 0000000..745dd24
--- /dev/null
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm67191.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium RM67171 OLED LCD panel with MIPI-DSI protocol
+
+maintainers:
+  - Robert Chiras <robert.chiras@nxp.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: raydium,rm67191
+
+  reg: true
+  port: true
+  reset-gpios: true
+  width-mm: true
+  height-mm: true
+
+  dsi-lanes:
+    description: Number of DSI lanes to be used must be <3> or <4>
+    enum: [3, 4]
+
+  v3p3-supply:
+    description: phandle to 3.3V regulator that powers the VDD_3V3 pin
+
+  v1p8-supply:
+    description: phandle to 1.8V regulator that powers the VDD_1V8 pin
+
+  video-mode:
+    description: |
+      0 - burst-mode
+      1 - non-burst with sync event
+      2 - non-burst with sync pulse
+    enum: [0, 1, 2]
+
+required:
+  - compatible
+  - reg
+  - dsi-lanes
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "raydium,rm67191";
+            reg = <0>;
+            reset-gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+            dsi-lanes = <4>;
+            width-mm = <68>;
+            height-mm = <121>;
+            video-mode = <1>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&mipi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml
new file mode 100644 (file)
index 0000000..96bdde9
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,amoled-mipi-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung AMOLED MIPI-DSI panels
+
+maintainers:
+  - Hoegeun Kwon <hoegeun.kwon@samsung.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    enum:
+        # Samsung S6E63J0X03 1.63" 320x320 AMOLED panel
+      - samsung,s6e63j0x03
+        # Samsung S6E3HA2 5.7" 1440x2560 AMOLED panel
+      - samsung,s6e3ha2
+        # Samsung S6E3HF2 5.65" 1600x2560 AMOLED panel
+      - samsung,s6e3hf2
+
+  reg: true
+  reset-gpios: true
+  enable-gpios: true
+  te-gpios: true
+
+  vdd3-supply:
+    description: I/O voltage supply
+
+  vci-supply:
+    description: voltage supply for analog circuits
+
+required:
+  - compatible
+  - reg
+  - vdd3-supply
+  - vci-supply
+  - reset-gpios
+  - enable-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "samsung,s6e3ha2";
+            reg = <0>;
+            vdd3-supply = <&ldo27_reg>;
+            vci-supply = <&ldo28_reg>;
+            reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
+            enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
+            te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.txt b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.txt
deleted file mode 100644 (file)
index 354d4d1..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-Samsung LD9040 AMOLED LCD parallel RGB panel with SPI control bus
-
-Required properties:
-  - compatible: "samsung,ld9040"
-  - reg: address of the panel on SPI bus
-  - vdd3-supply: core voltage supply
-  - vci-supply: voltage supply for analog circuits
-  - reset-gpios: a GPIO spec for the reset pin
-  - display-timings: timings for the connected panel according to [1]
-
-The panel must obey rules for SPI slave device specified in document [2].
-
-Optional properties:
-  - power-on-delay: delay after turning regulators on [ms]
-  - reset-delay: delay after reset sequence [ms]
-  - panel-width-mm: physical panel width [mm]
-  - panel-height-mm: physical panel height [mm]
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [3]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/display/panel/display-timing.txt
-[2]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[3]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
-       lcd@0 {
-               compatible = "samsung,ld9040";
-               reg = <0>;
-               vdd3-supply = <&ldo7_reg>;
-               vci-supply = <&ldo17_reg>;
-               reset-gpios = <&gpy4 5 0>;
-               spi-max-frequency = <1200000>;
-               spi-cpol;
-               spi-cpha;
-               power-on-delay = <10>;
-               reset-delay = <10>;
-               panel-width-mm = <90>;
-               panel-height-mm = <154>;
-
-               display-timings {
-                       timing {
-                               clock-frequency = <23492370>;
-                               hactive = <480>;
-                               vactive = <800>;
-                               hback-porch = <16>;
-                               hfront-porch = <16>;
-                               vback-porch = <2>;
-                               vfront-porch = <28>;
-                               hsync-len = <2>;
-                               vsync-len = <1>;
-                               hsync-active = <0>;
-                               vsync-active = <0>;
-                               de-active = <0>;
-                               pixelclk-active = <0>;
-                       };
-               };
-
-               port {
-                       lcd_ep: endpoint {
-                               remote-endpoint = <&fimd_dpi_ep>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
new file mode 100644 (file)
index 0000000..060ee27
--- /dev/null
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,ld9040.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung LD9040 AMOLED LCD parallel RGB panel with SPI control bus
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Andrzej Hajda <a.hajda@samsung.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: samsung,ld9040
+
+  display-timings: true
+  port: true
+  reg: true
+  reset-gpios: true
+
+  vdd3-supply:
+    description: core voltage supply
+
+  vci-supply:
+    description: voltage supply for analog circuits
+
+  power-on-delay:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: delay after turning regulators on [ms]
+
+  reset-delay:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: delay after reset sequence [ms]
+
+  panel-width-mm:
+    description: physical panel width [mm]
+
+  panel-height-mm:
+    description: physical panel height [mm]
+
+required:
+  - compatible
+  - reg
+  - vdd3-supply
+  - vci-supply
+  - reset-gpios
+  - display-timings
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        lcd@0 {
+            compatible = "samsung,ld9040";
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            reg = <0>;
+            vdd3-supply = <&ldo7_reg>;
+            vci-supply = <&ldo17_reg>;
+            reset-gpios = <&gpy4 5 0>;
+            spi-max-frequency = <1200000>;
+            spi-cpol;
+            spi-cpha;
+            power-on-delay = <10>;
+            reset-delay = <10>;
+            panel-width-mm = <90>;
+            panel-height-mm = <154>;
+
+            display-timings {
+                timing {
+                    clock-frequency = <23492370>;
+                    hactive = <480>;
+                    vactive = <800>;
+                    hback-porch = <16>;
+                    hfront-porch = <16>;
+                    vback-porch = <2>;
+                    vfront-porch = <28>;
+                    hsync-len = <2>;
+                    vsync-len = <1>;
+                    hsync-active = <0>;
+                    vsync-active = <0>;
+                    de-active = <0>;
+                    pixelclk-active = <0>;
+                };
+            };
+
+            port {
+                lcd_ep: endpoint {
+                    remote-endpoint = <&fimd_dpi_ep>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
deleted file mode 100644 (file)
index b94e366..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Samsung S6D16D0 4" 864x480 AMOLED panel
-
-Required properties:
-  - compatible: should be:
-    "samsung,s6d16d0",
-  - reg: the virtual channel number of a DSI peripheral
-  - vdd1-supply: I/O voltage supply
-  - reset-gpios: a GPIO spec for the reset pin (active low)
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in
-media/video-interfaces.txt. This node should describe panel's video bus.
-
-Example:
-&dsi {
-       ...
-
-       panel@0 {
-               compatible = "samsung,s6d16d0";
-               reg = <0>;
-               vdd1-supply = <&foo>;
-               reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
-
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&dsi_out>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml
new file mode 100644 (file)
index 0000000..66d1474
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,s6d16d0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S6D16D0 4" 864x480 AMOLED panel
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: samsung,s6d16d0
+
+  port: true
+  reg: true
+  reset-gpios: true
+
+  vdd1-supply:
+    description: I/O voltage supply
+
+required:
+  - compatible
+  - reg
+  - vdd1-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "samsung,s6d16d0";
+            reg = <0>;
+            vdd1-supply = <&foo>;
+            reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&dsi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt
deleted file mode 100644 (file)
index 4acea25..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-Samsung S6E3HA2 5.7" 1440x2560 AMOLED panel
-Samsung S6E3HF2 5.65" 1600x2560 AMOLED panel
-
-Required properties:
-  - compatible: should be one of:
-    "samsung,s6e3ha2",
-    "samsung,s6e3hf2".
-  - reg: the virtual channel number of a DSI peripheral
-  - vdd3-supply: I/O voltage supply
-  - vci-supply: voltage supply for analog circuits
-  - reset-gpios: a GPIO spec for the reset pin (active low)
-  - enable-gpios: a GPIO spec for the panel enable pin (active high)
-
-Optional properties:
-  - te-gpios: a GPIO spec for the tearing effect synchronization signal
-    gpio pin (active high)
-
-Example:
-&dsi {
-       ...
-
-       panel@0 {
-               compatible = "samsung,s6e3ha2";
-               reg = <0>;
-               vdd3-supply = <&ldo27_reg>;
-               vci-supply = <&ldo28_reg>;
-               reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
-               enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
-               te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt
deleted file mode 100644 (file)
index 3f1a839..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Samsung S6E63J0X03 1.63" 320x320 AMOLED panel (interface: MIPI-DSI command mode)
-
-Required properties:
-  - compatible: "samsung,s6e63j0x03"
-  - reg: the virtual channel number of a DSI peripheral
-  - vdd3-supply: I/O voltage supply
-  - vci-supply: voltage supply for analog circuits
-  - reset-gpios: a GPIO spec for the reset pin (active low)
-  - te-gpios: a GPIO spec for the tearing effect synchronization signal
-    gpio pin (active high)
-
-Example:
-&dsi {
-       ...
-
-       panel@0 {
-               compatible = "samsung,s6e63j0x03";
-               reg = <0>;
-               vdd3-supply = <&ldo16_reg>;
-               vci-supply = <&ldo20_reg>;
-               reset-gpios = <&gpe0 1 GPIO_ACTIVE_LOW>;
-               te-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt
deleted file mode 100644 (file)
index 9fb9ebe..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-Samsung s6e63m0 AMOLED LCD panel
-
-Required properties:
-  - compatible: "samsung,s6e63m0"
-  - reset-gpios: GPIO spec for reset pin
-  - vdd3-supply: VDD regulator
-  - vci-supply: VCI regulator
-
-The panel must obey rules for SPI slave device specified in document [1].
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [2]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[2]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
-               s6e63m0: display@0 {
-                       compatible = "samsung,s6e63m0";
-                       reg = <0>;
-                       reset-gpio = <&mp05 5 1>;
-                       vdd3-supply = <&ldo12_reg>;
-                       vci-supply = <&ldo11_reg>;
-                       spi-max-frequency = <1200000>;
-
-                       port {
-                               lcd_ep: endpoint {
-                                       remote-endpoint = <&fimd_ep>;
-                               };
-                       };
-               };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
new file mode 100644 (file)
index 0000000..1dab80a
--- /dev/null
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,s6e63m0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung s6e63m0 AMOLED LCD panel
+
+maintainers:
+  - Jonathan Bakker <xc-racer2@live.ca>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: samsung,s6e63m0
+
+  reg: true
+  reset-gpios: true
+  port: true
+
+  vdd3-supply:
+    description: VDD regulator
+
+  vci-supply:
+    description: VCI regulator
+
+required:
+  - compatible
+  - reset-gpios
+  - vdd3-supply
+  - vci-supply
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        display@0 {
+            compatible = "samsung,s6e63m0";
+            reg = <0>;
+            reset-gpios = <&mp05 5 1>;
+            vdd3-supply = <&ldo12_reg>;
+            vci-supply = <&ldo11_reg>;
+            spi-max-frequency = <1200000>;
+
+            port {
+                lcd_ep: endpoint {
+                    remote-endpoint = <&fimd_ep>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt b/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt
deleted file mode 100644 (file)
index aae57ef..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
-
-Required properties:
-- compatible: should be "sii,43wvf1g".
-- "dvdd-supply": 3v3 digital regulator.
-- "avdd-supply": 5v analog regulator.
-
-Optional properties:
-- backlight: phandle for the backlight control.
-
-Example:
-
-       panel {
-               compatible = "sii,43wvf1g";
-               backlight = <&backlight_display>;
-               dvdd-supply = <&reg_lcd_3v3>;
-               avdd-supply = <&reg_lcd_5v>;
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&display_out>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.yaml b/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.yaml
new file mode 100644 (file)
index 0000000..cfaa50c
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/seiko,43wvf1g.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
+
+maintainers:
+  - Marco Franchi <marco.franchi@nxp.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sii,43wvf1g
+
+  backlight: true
+  port: true
+
+  dvdd-supply:
+    description: 3v3 digital regulator
+
+  avdd-supply:
+    description: 5v analog regulator
+
+required:
+  - compatible
+  - dvdd-supply
+  - avdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    panel {
+        compatible = "sii,43wvf1g";
+
+        backlight = <&backlight_display>;
+        dvdd-supply = <&reg_lcd_3v3>;
+        avdd-supply = <&reg_lcd_5v>;
+        port {
+            panel_in: endpoint {
+                remote-endpoint = <&display_out>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt
deleted file mode 100644 (file)
index 0f57c31..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-Sharp 15" LQ150X1LG11 XGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq150x1lg11"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-
-Optional properties:
-- backlight: phandle of the backlight device
-- rlud-gpios: a single GPIO for the RL/UD (rotate 180 degrees) pin.
-- sellvds-gpios: a single GPIO for the SELLVDS pin.
-
-If rlud-gpios and/or sellvds-gpios are not specified, the RL/UD and/or SELLVDS
-pins are assumed to be handled appropriately by the hardware.
-
-Example:
-
-       backlight: backlight {
-               compatible = "pwm-backlight";
-               pwms = <&pwm 0 100000>;                      /* VBR */
-
-               brightness-levels = <0 20 40 60 80 100>;
-               default-brightness-level = <2>;
-
-               power-supply = <&vdd_12v_reg>;               /* VDD */
-               enable-gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;  /* XSTABY */
-       };
-
-       panel {
-               compatible = "sharp,lq150x1lg11";
-
-               power-supply = <&vcc_3v3_reg>;               /* VCC */
-
-               backlight = <&backlight>;
-               rlud-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;    /* RL/UD */
-               sellvds-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; /* SELLVDS */
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.yaml b/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.yaml
new file mode 100644 (file)
index 0000000..92f2d12
--- /dev/null
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,lq150x1lg11.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sharp 15" LQ150X1LG11 XGA TFT LCD panel
+
+maintainers:
+  - Peter Rosin <peda@axentia.se>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sharp,lq150x1lg11
+
+  power-supply: true
+  backlight: true
+
+  rlud-gpios:
+    maxItems: 1
+    description: |
+      GPIO for the RL/UD (rotate 180 degrees) pin.
+      If rlud-gpios and/or sellvds-gpios are not specified,
+      the RL/UD and/or SELLVDS pins are assumed to be handled
+      appropriately by the hardware.
+
+  sellvds-gpios:
+    maxItems: 1
+    description: |
+      GPIO for the SELLVDS pin.
+      If rlud-gpios and/or sellvds-gpios are not specified,
+      the RL/UD and/or SELLVDS pins are assumed to be handled
+      appropriately by the hardware.
+
+required:
+  - compatible
+  - power-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    panel {
+        compatible = "sharp,lq150x1lg11";
+
+        power-supply = <&vcc_3v3_reg>;               /* VCC */
+
+        backlight = <&backlight>;
+        rlud-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;    /* RL/UD */
+        sellvds-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; /* SELLVDS */
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.txt
deleted file mode 100644 (file)
index 0cc8981..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-SHARP LS037V7DW01 TFT-LCD panel
-===================================
-
-Required properties:
-- compatible: "sharp,ls037v7dw01"
-
-Optional properties:
-- label: a symbolic name for the panel
-- enable-gpios: a GPIO spec for the optional enable pin.
-  This pin is the INI pin as specified in the LS037V7DW01.pdf file.
-- reset-gpios: a GPIO spec for the optional reset pin.
-  This pin is the RESB pin as specified in the LS037V7DW01.pdf file.
-- mode-gpios: a GPIO
-  ordered MO, LR, and UD as specified in the LS037V7DW01.pdf file.
-
-Required nodes:
-- Video port for DPI input
-
-This panel can have zero to five GPIOs to configure to change configuration
-between QVGA and VGA mode and the scan direction. As these pins can be also
-configured with external pulls, all the GPIOs are considered optional with holes
-in the array.
-
-Example
--------
-
-Example when connected to a omap2+ based device:
-
-lcd0: display {
-       compatible = "sharp,ls037v7dw01";
-       power-supply = <&lcd_3v3>;
-       enable-gpios = <&gpio5 24 GPIO_ACTIVE_HIGH>;    /* gpio152, lcd INI */
-       reset-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;     /* gpio155, lcd RESB */
-       mode-gpios = <&gpio5 26 GPIO_ACTIVE_HIGH        /* gpio154, lcd MO */
-                     &gpio1 2 GPIO_ACTIVE_HIGH         /* gpio2, lcd LR */
-                     &gpio1 3 GPIO_ACTIVE_HIGH>;       /* gpio3, lcd UD */
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.yaml
new file mode 100644 (file)
index 0000000..8c47a9b
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,ls037v7dw01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SHARP LS037V7DW01 TFT-LCD panel
+
+description: |
+  This panel can have zero to five GPIOs to configure to change configuration
+  between QVGA and VGA mode and the scan direction. As these pins can be also
+  configured with external pulls, all the GPIOs are considered optional with holes
+  in the array.
+
+maintainers:
+  - Tony Lindgren <tony@atomide.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sharp,ls037v7dw01
+
+  label: true
+  enable-gpios: true
+  reset-gpios: true
+  port: true
+  power-supply: true
+
+  mode-gpios:
+    minItems: 1
+    maxItems: 3
+    description: |
+      GPIO ordered MO, LR, and UD as specified in LS037V7DW01.pdf
+      This panel can have zero to three GPIOs to configure to
+      change configuration between QVGA and VGA mode and the
+      scan direction. As these pins can be also configured
+      with external pulls, all the GPIOs are considered
+      optional with holes in the array.
+
+required:
+  - compatible
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    lcd0: display {
+        compatible = "sharp,ls037v7dw01";
+        power-supply = <&lcd_3v3>;
+        enable-gpios = <&gpio5 24 GPIO_ACTIVE_HIGH>;    /* gpio152, lcd INI */
+        reset-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;     /* gpio155, lcd RESB */
+        mode-gpios = <&gpio5 26 GPIO_ACTIVE_HIGH        /* gpio154, lcd MO */
+                      &gpio1 2 GPIO_ACTIVE_HIGH         /* gpio2, lcd LR */
+                      &gpio1 3 GPIO_ACTIVE_HIGH>;       /* gpio3, lcd UD */
+
+        port {
+            lcd_in: endpoint {
+                remote-endpoint = <&dpi_out>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt
deleted file mode 100644 (file)
index 3770a11..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Sharp Microelectronics 4.3" qHD TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,ls043t1le01-qhd"
-- reg: DSI virtual channel of the peripheral
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-- reset-gpios: a GPIO spec for the reset pin
-
-Example:
-
-       mdss_dsi@fd922800 {
-               panel@0 {
-                       compatible = "sharp,ls043t1le01-qhd";
-                       reg = <0>;
-                       avdd-supply = <&pm8941_l22>;
-                       backlight = <&pm8941_wled>;
-                       reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml
new file mode 100644 (file)
index 0000000..a90d0d8
--- /dev/null
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,ls043t1le01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sharp Microelectronics 4.3" qHD TFT LCD panel
+
+maintainers:
+  - Werner Johansson <werner.johansson@sonymobile.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sharp,ls043t1le01-qhd
+
+  reg: true
+  backlight: true
+  reset-gpios: true
+  port: true
+
+  avdd-supply:
+    description: handle of the regulator that provides the supply voltage
+
+required:
+  - compatible
+  - reg
+  - avdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "sharp,ls043t1le01-qhd";
+            reg = <0>;
+            avdd-supply = <&pm8941_l22>;
+            backlight = <&pm8941_wled>;
+            reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/simple-panel.txt b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
deleted file mode 100644 (file)
index e11208f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-See panel-common.yaml in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
deleted file mode 100644 (file)
index ccd1759..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Sitronix ST7701 based LCD panels
-
-ST7701 designed for small and medium sizes of TFT LCD display, is
-capable of supporting up to 480RGBX864 in resolution. It provides
-several system interfaces like MIPI/RGB/SPI.
-
-Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has
-inbuilt ST7701 chip.
-
-Required properties:
-- compatible: must be "sitronix,st7701" and one of
-  * "techstar,ts8550b"
-- reset-gpios: a GPIO phandle for the reset pin
-
-Required properties for techstar,ts8550b:
-- reg: DSI virtual channel used by that screen
-- VCC-supply: analog regulator for MIPI circuit
-- IOVCC-supply: I/O system regulator
-
-Optional properties:
-- backlight: phandle for the backlight control.
-
-panel@0 {
-       compatible = "techstar,ts8550b", "sitronix,st7701";
-       reg = <0>;
-       VCC-supply = <&reg_dldo2>;
-       IOVCC-supply = <&reg_dldo2>;
-       reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
-       backlight = <&backlight>;
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
new file mode 100644 (file)
index 0000000..6dff59f
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sitronix,st7701.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7701 based LCD panels
+
+maintainers:
+  - Jagan Teki <jagan@amarulasolutions.com>
+
+description: |
+  ST7701 designed for small and medium sizes of TFT LCD display, is
+  capable of supporting up to 480RGBX864 in resolution. It provides
+  several system interfaces like MIPI/RGB/SPI.
+
+  Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has
+  inbuilt ST7701 chip.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - techstar,ts8550b
+      - const: sitronix,st7701
+
+  reg:
+    description: DSI virtual channel used by that screen
+    maxItems: 1
+
+  VCC-supply:
+    description: analog regulator for MIPI circuit
+
+  IOVCC-supply:
+    description: I/O system regulator
+
+  reset-gpios: true
+
+  backlight: true
+
+required:
+  - compatible
+  - reg
+  - VCC-supply
+  - IOVCC-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "techstar,ts8550b", "sitronix,st7701";
+            reg = <0>;
+            VCC-supply = <&reg_dldo2>;
+            IOVCC-supply = <&reg_dldo2>;
+            reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
+            backlight = <&backlight>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt
deleted file mode 100644 (file)
index c6995dd..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-Sitronix ST7789V RGB panel with SPI control bus
-
-Required properties:
-  - compatible: "sitronix,st7789v"
-  - reg: Chip select of the panel on the SPI bus
-  - reset-gpios: a GPIO phandle for the reset pin
-  - power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-  - backlight: phandle to the backlight used
-
-The generic bindings for the SPI slaves documented in [1] also applies
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [2]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[2]: Documentation/devicetree/bindings/graph.txt
-
-Example:
-
-panel@0 {
-       compatible = "sitronix,st7789v";
-       reg = <0>;
-       reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
-       backlight = <&pwm_bl>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       port {
-               panel_input: endpoint {
-                       remote-endpoint = <&tcon0_out_panel>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
new file mode 100644 (file)
index 0000000..fa46d15
--- /dev/null
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sitronix,st7789v.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7789V RGB panel with SPI control bus
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Maxime Ripard <mripard@kernel.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sitronix,st7789v
+
+  reg: true
+  reset-gpios: true
+  power-supply: true
+  backlight: true
+  port: true
+
+required:
+  - compatible
+  - reg
+  - reset-gpios
+  - power-supply
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "sitronix,st7789v";
+            reg = <0>;
+            reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
+            backlight = <&pwm_bl>;
+            power-supply = <&power>;
+            spi-max-frequency = <100000>;
+            spi-cpol;
+            spi-cpha;
+
+            port {
+                panel_input: endpoint {
+                    remote-endpoint = <&tcon0_out_panel>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.txt b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.txt
deleted file mode 100644 (file)
index e123332..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Sony ACX565AKM SDI Panel
-========================
-
-Required properties:
-- compatible: "sony,acx565akm"
-
-Optional properties:
-- label: a symbolic name for the panel
-- reset-gpios: panel reset gpio
-
-Required nodes:
-- Video port for SDI input
-
-Example
--------
-
-acx565akm@2 {
-       compatible = "sony,acx565akm";
-       spi-max-frequency = <6000000>;
-       reg = <2>;
-
-       label = "lcd";
-       reset-gpios = <&gpio3 26 GPIO_ACTIVE_HIGH>; /* 90 */
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&sdi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
new file mode 100644 (file)
index 0000000..95d053c
--- /dev/null
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sony,acx565akm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sony ACX565AKM SDI Panel
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sony,acx565akm
+
+  label: true
+  reset-gpios: true
+  port: true
+
+required:
+  - compatible
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@2 {
+            compatible = "sony,acx565akm";
+            spi-max-frequency = <6000000>;
+            reg = <2>;
+
+            label = "lcd";
+            reset-gpios = <&gpio3 26 GPIO_ACTIVE_HIGH>; /* 90 */
+
+            port {
+                lcd_in: endpoint {
+                    remote-endpoint = <&sdi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.txt b/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.txt
deleted file mode 100644 (file)
index 70cd8d1..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-Startek Electronic Technology Co. KD050C 5.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "startek,startek-kd050c"
diff --git a/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.yaml b/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.yaml
new file mode 100644 (file)
index 0000000..fd66864
--- /dev/null
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/startek,startek-kd050c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Startek Electronic Technology Co. KD050C 5.0" WVGA TFT LCD panel
+
+maintainers:
+  - Nikita Kiryanov <nikita@compulab.co.il>
+
+allOf:
+  - $ref: panel-dpi.yaml#
+
+properties:
+  compatible:
+    items:
+      - const: startek,startek-kd050c
+      - {} # panel-dpi, but not listed here to avoid false select
+
+  backlight: true
+  enable-gpios: true
+  height-mm: true
+  label: true
+  panel-timing: true
+  port: true
+  power-supply: true
+  reset-gpios: true
+  width-mm: true
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
new file mode 100644 (file)
index 0000000..4aa6056
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/tpo,td.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toppoly TD Panels
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Marek Belisko <marek@goldelico.com>
+  - H. Nikolaus Schaller <hns@goldelico.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    enum:
+        # Toppoly TD028TTEC1 Panel
+      - tpo,td028ttec1
+        # Toppoly TD043MTEA1 Panel
+      - tpo,td043mtea1
+
+  reg: true
+  label: true
+  reset-gpios: true
+  backlight: true
+  port: true
+
+required:
+  - compatible
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel: panel@0 {
+            compatible = "tpo,td043mtea1";
+            reg = <0>;
+            spi-max-frequency = <100000>;
+            spi-cpol;
+            spi-cpha;
+
+            label = "lcd";
+
+            reset-gpios = <&gpio7 7 0>;
+
+            port {
+                lcd_in: endpoint {
+                    remote-endpoint = <&dpi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
deleted file mode 100644 (file)
index 898e06e..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-Toppoly TD028TTEC1 Panel
-========================
-
-Required properties:
-- compatible: "tpo,td028ttec1"
-
-Optional properties:
-- label: a symbolic name for the panel
-- backlight: phandle of the backlight device
-
-Required nodes:
-- Video port for DPI input
-
-Example
--------
-
-lcd-panel: td028ttec1@0 {
-       compatible = "tpo,td028ttec1";
-       reg = <0>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       label = "lcd";
-       backlight = <&backlight>;
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
-
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td043mtea1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td043mtea1.txt
deleted file mode 100644 (file)
index ec6d629..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-TPO TD043MTEA1 Panel
-====================
-
-Required properties:
-- compatible: "tpo,td043mtea1"
-- reset-gpios: panel reset gpio
-
-Optional properties:
-- label: a symbolic name for the panel
-
-Required nodes:
-- Video port for DPI input
-
-Example
--------
-
-lcd-panel: panel@0 {
-       compatible = "tpo,td043mtea1";
-       reg = <0>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       label = "lcd";
-
-       reset-gpios = <&gpio7 7 0>;
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
new file mode 100644 (file)
index 0000000..b36f39f
--- /dev/null
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/visionox,rm69299.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Visionox model RM69299 Panels Device Tree Bindings.
+
+maintainers:
+ - Harigovindan P <harigovi@codeaurora.org>
+
+description: |
+  This binding is for display panels using a Visionox RM692999 panel.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: visionox,rm69299-1080p-display
+
+  vdda-supply:
+    description: |
+      Phandle of the regulator that provides the vdda supply voltage.
+
+  vdd3p3-supply:
+    description: |
+      Phandle of the regulator that provides the vdd3p3 supply voltage.
+
+  port: true
+  reset-gpios: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - vdda-supply
+  - vdd3p3-supply
+  - reset-gpios
+  - port
+
+examples:
+  - |
+    panel {
+        compatible = "visionox,rm69299-1080p-display";
+
+        vdda-supply = <&src_pp1800_l8c>;
+        vdd3p3-supply = <&src_pp2800_l18a>;
+
+        reset-gpios = <&pm6150l_gpio 3 0>;
+        port {
+            panel0_in: endpoint {
+                remote-endpoint = <&dsi0_out>;
+            };
+        };
+    };
+...
index d9fdb58e06b4218bc1db4d2b9b8534085903f336..6913923df5695ccd453a7597ebf532f18c509c78 100644 (file)
@@ -37,7 +37,6 @@ examples:
     dsi {
         #address-cells = <1>;
         #size-cells = <0>;
-        reg = <0xff450000 0x1000>;
 
         panel@0 {
             compatible = "xinpeng,xpp055c272";
index eb4ae41fe41f83c73d0269146e20d55f0349b115..51cd4d1627703a154ce3318f0df9006636c1c564 100644 (file)
@@ -50,6 +50,14 @@ Required Properties:
     VSP instance that serves the DU channel, and the channel index identifies
     the LIF instance in that VSP.
 
+Optional properties:
+  - resets: A list of phandle + reset-specifier pairs, one for each entry in
+    the reset-names property.
+  - reset-names: Names of the resets. This property is model-dependent.
+    - All but R8A7779 use one reset for a group of one or more successive
+      channels. The resets must be named "du.x" with "x" being the numerical
+      index of the lowest channel in the group.
+
 Required nodes:
 
 The connections to the DU output video ports are modeled using the OF graph
@@ -96,6 +104,8 @@ Example: R8A7795 (R-Car H3) ES2.0 DU
                         <&cpg CPG_MOD 722>,
                         <&cpg CPG_MOD 721>;
                clock-names = "du.0", "du.1", "du.2", "du.3";
+               resets = <&cpg 724>, <&cpg 722>;
+               reset-names = "du.0", "du.2";
                renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>, <&cmm3>;
                renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>;
 
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt
deleted file mode 100644 (file)
index d1ad31b..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-Rockchip specific extensions for rk3066 HDMI
-============================================
-
-Required properties:
-- compatible:
-       "rockchip,rk3066-hdmi";
-- reg:
-       Physical base address and length of the controller's registers.
-- clocks, clock-names:
-       Phandle to HDMI controller clock, name should be "hclk".
-- interrupts:
-       HDMI interrupt number.
-- power-domains:
-       Phandle to the RK3066_PD_VIO power domain.
-- rockchip,grf:
-       This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
-- ports:
-       Contains one port node with two endpoints, numbered 0 and 1,
-       connected respectively to vop0 and vop1.
-       Contains one port node with one endpoint
-       connected to a hdmi-connector node.
-- pinctrl-0, pinctrl-name:
-       Switch the iomux for the HPD/I2C pins to HDMI function.
-
-Example:
-       hdmi: hdmi@10116000 {
-               compatible = "rockchip,rk3066-hdmi";
-               reg = <0x10116000 0x2000>;
-               interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru HCLK_HDMI>;
-               clock-names = "hclk";
-               power-domains = <&power RK3066_PD_VIO>;
-               rockchip,grf = <&grf>;
-               pinctrl-names = "default";
-               pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       hdmi_in: port@0 {
-                               reg = <0>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-                               hdmi_in_vop0: endpoint@0 {
-                                       reg = <0>;
-                                       remote-endpoint = <&vop0_out_hdmi>;
-                               };
-                               hdmi_in_vop1: endpoint@1 {
-                                       reg = <1>;
-                                       remote-endpoint = <&vop1_out_hdmi>;
-                               };
-                       };
-                       hdmi_out: port@1 {
-                               reg = <1>;
-                               hdmi_out_con: endpoint {
-                                       remote-endpoint = <&hdmi_con_in>;
-                               };
-                       };
-               };
-       };
-
-&pinctrl {
-               hdmi {
-                       hdmi_hpd: hdmi-hpd {
-                               rockchip,pins = <0 RK_PA0 1 &pcfg_pull_default>;
-                       };
-                       hdmii2c_xfer: hdmii2c-xfer {
-                               rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>,
-                                               <0 RK_PA2 1 &pcfg_pull_none>;
-                       };
-               };
-};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
new file mode 100644 (file)
index 0000000..4110d00
--- /dev/null
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip,rk3066-hdmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip rk3066 HDMI controller
+
+maintainers:
+  - Sandy Huang <hjc@rock-chips.com>
+  - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+  compatible:
+    const: rockchip,rk3066-hdmi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: hclk
+
+  pinctrl-0:
+    maxItems: 2
+
+  pinctrl-names:
+    const: default
+    description:
+      Switch the iomux for the HPD/I2C pins to HDMI function.
+
+  power-domains:
+    maxItems: 1
+
+  rockchip,grf:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
+
+  ports:
+    type: object
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description:
+          Port node with two endpoints, numbered 0 and 1,
+          connected respectively to vop0 and vop1.
+
+      port@1:
+        type: object
+        description:
+          Port node with one endpoint connected to a hdmi-connector node.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - pinctrl-0
+  - pinctrl-names
+  - power-domains
+  - rockchip,grf
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/rk3066a-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/pinctrl/rockchip.h>
+    #include <dt-bindings/power/rk3066-power.h>
+    hdmi: hdmi@10116000 {
+      compatible = "rockchip,rk3066-hdmi";
+      reg = <0x10116000 0x2000>;
+      interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&cru HCLK_HDMI>;
+      clock-names = "hclk";
+      pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
+      pinctrl-names = "default";
+      power-domains = <&power RK3066_PD_VIO>;
+      rockchip,grf = <&grf>;
+
+      ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        hdmi_in: port@0 {
+          reg = <0>;
+          #address-cells = <1>;
+          #size-cells = <0>;
+          hdmi_in_vop0: endpoint@0 {
+            reg = <0>;
+            remote-endpoint = <&vop0_out_hdmi>;
+          };
+          hdmi_in_vop1: endpoint@1 {
+            reg = <1>;
+            remote-endpoint = <&vop1_out_hdmi>;
+          };
+        };
+        hdmi_out: port@1 {
+          reg = <1>;
+          hdmi_out_con: endpoint {
+            remote-endpoint = <&hdmi_con_in>;
+          };
+        };
+      };
+    };
+
+    pinctrl {
+      hdmi {
+        hdmi_hpd: hdmi-hpd {
+          rockchip,pins = <0 RK_PA0 1 &pcfg_pull_default>;
+        };
+        hdmii2c_xfer: hdmii2c-xfer {
+          rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>,
+                          <0 RK_PA2 1 &pcfg_pull_none>;
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
deleted file mode 100644 (file)
index 8b3a5f5..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-device-tree bindings for rockchip soc display controller (vop)
-
-VOP (Visual Output Processor) is the Display Controller for the Rockchip
-series of SoCs which transfers the image data from a video memory
-buffer to an external LCD interface.
-
-Required properties:
-- compatible: value should be one of the following
-               "rockchip,rk3036-vop";
-               "rockchip,rk3126-vop";
-               "rockchip,px30-vop-lit";
-               "rockchip,px30-vop-big";
-               "rockchip,rk3066-vop";
-               "rockchip,rk3188-vop";
-               "rockchip,rk3288-vop";
-               "rockchip,rk3368-vop";
-               "rockchip,rk3366-vop";
-               "rockchip,rk3399-vop-big";
-               "rockchip,rk3399-vop-lit";
-               "rockchip,rk3228-vop";
-               "rockchip,rk3328-vop";
-
-- reg: Must contain one entry corresponding to the base address and length
-       of the register space. Can optionally contain a second entry
-       corresponding to the CRTC gamma LUT address.
-
-- interrupts: should contain a list of all VOP IP block interrupts in the
-                order: VSYNC, LCD_SYSTEM. The interrupt specifier
-                format depends on the interrupt controller used.
-
-- clocks: must include clock specifiers corresponding to entries in the
-               clock-names property.
-
-- clock-names: Must contain
-               aclk_vop: for ddr buffer transfer.
-               hclk_vop: for ahb bus to R/W the phy regs.
-               dclk_vop: pixel clock.
-
-- resets: Must contain an entry for each entry in reset-names.
-  See ../reset/reset.txt for details.
-- reset-names: Must include the following entries:
-  - axi
-  - ahb
-  - dclk
-
-- iommus: required a iommu node
-
-- port: A port node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Example:
-SoC specific DT entry:
-       vopb: vopb@ff930000 {
-               compatible = "rockchip,rk3288-vop";
-               reg = <0x0 0xff930000 0x0 0x19c>, <0x0 0xff931000 0x0 0x1000>;
-               interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
-               clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
-               resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
-               reset-names = "axi", "ahb", "dclk";
-               iommus = <&vopb_mmu>;
-               vopb_out: port {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       vopb_out_edp: endpoint@0 {
-                               reg = <0>;
-                               remote-endpoint=<&edp_in_vopb>;
-                       };
-                       vopb_out_hdmi: endpoint@1 {
-                               reg = <1>;
-                               remote-endpoint=<&hdmi_in_vopb>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
new file mode 100644 (file)
index 0000000..1695e3e
--- /dev/null
@@ -0,0 +1,134 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip-vop.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SoC display controller (VOP)
+
+description:
+  VOP (Video Output Processor) is the display controller for the Rockchip
+  series of SoCs which transfers the image data from a video memory
+  buffer to an external LCD interface.
+
+maintainers:
+  - Sandy Huang <hjc@rock-chips.com>
+  - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+  compatible:
+    enum:
+      - rockchip,px30-vop-big
+      - rockchip,px30-vop-lit
+      - rockchip,rk3036-vop
+      - rockchip,rk3066-vop
+      - rockchip,rk3126-vop
+      - rockchip,rk3188-vop
+      - rockchip,rk3228-vop
+      - rockchip,rk3288-vop
+      - rockchip,rk3328-vop
+      - rockchip,rk3366-vop
+      - rockchip,rk3368-vop
+      - rockchip,rk3399-vop-big
+      - rockchip,rk3399-vop-lit
+
+  reg:
+    minItems: 1
+    items:
+      - description:
+          Must contain one entry corresponding to the base address and length
+          of the register space.
+      - description:
+          Can optionally contain a second entry corresponding to
+          the CRTC gamma LUT address.
+
+  interrupts:
+    maxItems: 1
+    description:
+      The VOP interrupt is shared by several interrupt sources, such as
+      frame start (VSYNC), line flag and other status interrupts.
+
+  clocks:
+    items:
+      - description: Clock for ddr buffer transfer.
+      - description: Pixel clock.
+      - description: Clock for the ahb bus to R/W the phy regs.
+
+  clock-names:
+    items:
+      - const: aclk_vop
+      - const: dclk_vop
+      - const: hclk_vop
+
+  resets:
+    maxItems: 3
+
+  reset-names:
+    items:
+      - const: axi
+      - const: ahb
+      - const: dclk
+
+  port:
+    type: object
+    description:
+      A port node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+  assigned-clocks:
+    maxItems: 2
+
+  assigned-clock-rates:
+    maxItems: 2
+
+  iommus:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/rk3288-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/rk3288-power.h>
+    vopb: vopb@ff930000 {
+      compatible = "rockchip,rk3288-vop";
+      reg = <0x0 0xff930000 0x0 0x19c>,
+            <0x0 0xff931000 0x0 0x1000>;
+      interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&cru ACLK_VOP0>,
+               <&cru DCLK_VOP0>,
+               <&cru HCLK_VOP0>;
+      clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+      power-domains = <&power RK3288_PD_VIO>;
+      resets = <&cru SRST_LCDC1_AXI>,
+               <&cru SRST_LCDC1_AHB>,
+               <&cru SRST_LCDC1_DCLK>;
+      reset-names = "axi", "ahb", "dclk";
+      iommus = <&vopb_mmu>;
+      vopb_out: port {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        vopb_out_edp: endpoint@0 {
+          reg = <0>;
+          remote-endpoint=<&edp_in_vopb>;
+        };
+        vopb_out_hdmi: endpoint@1 {
+          reg = <1>;
+          remote-endpoint=<&hdmi_in_vopb>;
+        };
+      };
+    };
index b38ee732efa9fd5f857b207c8941ca17acf83c5f..cd17684aaab5bcd38bb8b1d7644601ae0a0186a1 100644 (file)
@@ -1,4 +1,4 @@
-Analog Device AXI-DMAC DMA controller
+Analog Devices AXI-DMAC DMA controller
 
 Required properties:
  - compatible: Must be "adi,axi-dmac-1.00.a".
index 57a240d2d02694b8d3d2547390bc5eb9654c5dc8..7db78767c02d0dba38d2c40e7c9de59ddafb2af0 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright 2019 Analog Devices Inc.
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/hwmon/adi,axi-fan-control.yaml#
+$id: http://devicetree.org/schemas/hwmon/adi,axi-fan-control.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices AXI FAN Control Device Tree Bindings
@@ -47,7 +47,7 @@ required:
 
 examples:
   - |
-    fpga_axi: fpga-axi@0 {
+    fpga_axi: fpga-axi {
             #address-cells = <0x2>;
             #size-cells = <0x1>;
 
index 76985034ea7351de10b1de07a0b0351e8b6b1a88..46c441574f98b592e651b57fe34c885a9d41a8a0 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/adt7475.yaml#
+$id: http://devicetree.org/schemas/hwmon/adt7475.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: ADT7475 hwmon sensor
index f0bbd7e1029bd44d7c2c989217cbfc0b144635f9..502e1e55adbdebd08d588350ba8a2f57b28a8696 100644 (file)
@@ -1,4 +1,4 @@
-* Analog Device AD5755 IIO Multi-Channel DAC Linux Driver
+* Analog Devices AD5755 IIO Multi-Channel DAC Linux Driver
 
 Required properties:
  - compatible: Has to contain one of the following:
index d9c25cf4b92f2f17cc4488d5f8cde3ef74eb0caf..58d81ca434605be574fecf06d75324c0e0bddb7c 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright 2020 Analog Devices Inc.
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/iio/dac/adi,ad5770r.yaml#
+$id: http://devicetree.org/schemas/iio/dac/adi,ad5770r.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices AD5770R DAC device driver
@@ -49,93 +49,86 @@ properties:
       asserted during driver probe.
     maxItems: 1
 
-  channel0:
+  channel@0:
     description: Represents an external channel which are
       connected to the DAC. Channel 0 can act both as a current
       source and sink.
     type: object
 
     properties:
-      num:
+      reg:
         description: This represents the channel number.
-        items:
-          const: 0
+        const: 0
 
       adi,range-microamp:
           description: Output range of the channel.
           oneOf:
-            - $ref: /schemas/types.yaml#/definitions/int32-array
             - items:
-                - enum: [0 300000]
-                - enum: [-60000 0]
-                - enum: [-60000 300000]
+                - const: 0
+                - const: 300000
+            - items:
+                - const: -60000
+                - const: 0
+            - items:
+                - const: -60000
+                - const: 300000
 
-  channel1:
+  channel@1:
     description: Represents an external channel which are
       connected to the DAC.
     type: object
 
     properties:
-      num:
+      reg:
         description: This represents the channel number.
-        items:
-          const: 1
+        const: 1
 
       adi,range-microamp:
           description: Output range of the channel.
-          oneOf:
-            - $ref: /schemas/types.yaml#/definitions/uint32-array
-            - items:
-                - enum: [0 140000]
-                - enum: [0 250000]
+          items:
+            - const: 0
+            - enum: [ 140000, 250000 ]
 
-  channel2:
+  channel@2:
     description: Represents an external channel which are
       connected to the DAC.
     type: object
 
     properties:
-      num:
+      reg:
         description: This represents the channel number.
-        items:
-          const: 2
+        const: 2
 
       adi,range-microamp:
           description: Output range of the channel.
-          oneOf:
-            - $ref: /schemas/types.yaml#/definitions/uint32-array
-            - items:
-                - enum: [0 140000]
-                - enum: [0 250000]
+          items:
+            - const: 0
+            - enum: [ 55000, 150000 ]
 
 patternProperties:
   "^channel@([3-5])$":
     type: object
     description: Represents the external channels which are connected to the DAC.
     properties:
-      num:
+      reg:
         description: This represents the channel number.
-        items:
-          minimum: 3
-          maximum: 5
+        minimum: 3
+        maximum: 5
 
       adi,range-microamp:
           description: Output range of the channel.
-          oneOf:
-            - $ref: /schemas/types.yaml#/definitions/uint32-array
-            - items:
-                - enum: [0 45000]
-                - enum: [0 100000]
+          items:
+            - const: 0
+            - enum: [ 45000, 100000 ]
 
 required:
 - reg
-- diff-channels
-- channel0
-- channel1
-- channel2
-- channel3
-- channel4
-- channel5
+- channel@0
+- channel@1
+- channel@2
+- channel@3
+- channel@4
+- channel@5
 
 examples:
   - |
@@ -144,40 +137,42 @@ examples:
                 #size-cells = <0>;
 
                 ad5770r@0 {
-                        compatible = "ad5770r";
+                        compatible = "adi,ad5770r";
                         reg = <0>;
                         spi-max-frequency = <1000000>;
                         vref-supply = <&vref>;
                         adi,external-resistor;
                         reset-gpios = <&gpio 22 0>;
+                        #address-cells = <1>;
+                        #size-cells = <0>;
 
                         channel@0 {
-                                num = <0>;
-                                adi,range-microamp = <(-60000) 300000>;
+                                reg = <0>;
+                                adi,range-microamp = <0 300000>;
                         };
 
                         channel@1 {
-                                num = <1>;
+                                reg = <1>;
                                 adi,range-microamp = <0 140000>;
                         };
 
                         channel@2 {
-                                num = <2>;
+                                reg = <2>;
                                 adi,range-microamp = <0 55000>;
                         };
 
                         channel@3 {
-                                num = <3>;
+                                reg = <3>;
                                 adi,range-microamp = <0 45000>;
                         };
 
                         channel@4 {
-                                num = <4>;
+                                reg = <4>;
                                 adi,range-microamp = <0 45000>;
                         };
 
                         channel@5 {
-                                num = <5>;
+                                reg = <5>;
                                 adi,range-microamp = <0 45000>;
                         };
                 };
index 8d58709d4b478368215210189c96f054b71611ca..383d64a918547ea1c851d2c6b19982c94e39f3a6 100644 (file)
@@ -109,7 +109,7 @@ examples:
   - |
     #include <dt-bindings/gpio/gpio.h>
     #include <dt-bindings/interrupt-controller/arm-gic.h>
-    i2c@00000000 {
+    i2c {
       #address-cells = <1>;
       #size-cells = <0>;
       edt-ft5x06@38 {
index 9c6b91fee477acd5fca5b2124e56131a1edbadae..26f1fcf0857aa1348b03bfc0831e415dc83aa166 100644 (file)
@@ -56,9 +56,8 @@ properties:
       cell with zero.
     allOf:
       - $ref: /schemas/types.yaml#/definitions/uint32-array
-      - items:
-          minItems: 4
-          maxItems: 4
+      - minItems: 4
+        maxItems: 4
 
 
 required:
index 12516bd89cf9696812ed9298933177ce4ddf39ec..611bda38d1872b45282537f81e07c6027b7141ff 100644 (file)
@@ -97,30 +97,35 @@ examples:
     #include <dt-bindings/clock/tegra186-clock.h>
     #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-    memory-controller@2c00000 {
-        compatible = "nvidia,tegra186-mc";
-        reg = <0x0 0x02c00000 0x0 0xb0000>;
-        interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
-
+    bus {
         #address-cells = <2>;
         #size-cells = <2>;
 
-        ranges = <0x0 0x02c00000 0x02c00000 0x0 0xb0000>;
+        memory-controller@2c00000 {
+            compatible = "nvidia,tegra186-mc";
+            reg = <0x0 0x02c00000 0x0 0xb0000>;
+            interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+
+            #address-cells = <2>;
+            #size-cells = <2>;
+
+            ranges = <0x0 0x02c00000 0x0 0x02c00000 0x0 0xb0000>;
 
-        /*
-         * Memory clients have access to all 40 bits that the memory
-         * controller can address.
-         */
-        dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
+            /*
+             * Memory clients have access to all 40 bits that the memory
+             * controller can address.
+             */
+            dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
 
-        external-memory-controller@2c60000 {
-            compatible = "nvidia,tegra186-emc";
-            reg = <0x0 0x02c60000 0x0 0x50000>;
-            interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
-            clocks = <&bpmp TEGRA186_CLK_EMC>;
-            clock-names = "emc";
+            external-memory-controller@2c60000 {
+                compatible = "nvidia,tegra186-emc";
+                reg = <0x0 0x02c60000 0x0 0x50000>;
+                interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+                clocks = <&bpmp TEGRA186_CLK_EMC>;
+                clock-names = "emc";
 
-            nvidia,bpmp = <&bpmp>;
+                nvidia,bpmp = <&bpmp>;
+            };
         };
     };
 
index aa922c560fccbdf6041599d2ee6661cd5b95bd86..65018a019e1db316b442bfe32ac025e2225d33ff 100644 (file)
@@ -123,7 +123,9 @@ examples:
     #include <dt-bindings/leds/common.h>
 
     i2c {
-      pmic: pmic@4b {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        pmic: pmic@4b {
             compatible = "rohm,bd71837";
             reg = <0x4b>;
             interrupt-parent = <&gpio1>;
index 402e40dfe0b85b0200aca2b9534f4d5e9b56979c..77bcca2d414f8bda4ed4fe49c2ad183e0b1b3875 100644 (file)
@@ -128,7 +128,9 @@ examples:
     #include <dt-bindings/leds/common.h>
 
     i2c {
-      pmic: pmic@4b {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        pmic: pmic@4b {
             compatible = "rohm,bd71847";
             reg = <0x4b>;
             interrupt-parent = <&gpio1>;
index d9ad9260e348a826777ceed3d91b55b9feb86904..f88d13d7044137e09c382caea88fb5e503a2b73a 100644 (file)
@@ -274,7 +274,7 @@ examples:
   - |
     #include <dt-bindings/mfd/st,stpmic1.h>
     #include <dt-bindings/interrupt-controller/arm-gic.h>
-    i2c@0 {
+    i2c {
       #address-cells = <1>;
       #size-cells = <0>;
       pmic@33 {
index 8927941c74bb836e87b70d7fba641ca993add157..5aa141ccc1137abd1780f84104a7c7c5d869141c 100644 (file)
@@ -43,6 +43,9 @@ properties:
           second group of digits is the Phy Identifier 2 register,
           this is the chip vendor OUI bits 19:24, followed by 10
           bits of a vendor specific ID.
+      - items:
+          - pattern: "^ethernet-phy-id[a-f0-9]{4}\\.[a-f0-9]{4}$"
+          - const: ethernet-phy-ieee802.3-c22
       - items:
           - pattern: "^ethernet-phy-id[a-f0-9]{4}\\.[a-f0-9]{4}$"
           - const: ethernet-phy-ieee802.3-c45
index 5b88fae0307d153e50a6faac33d6152bdf2711d4..ff8b0f211aa190ba24a42ec71e8d941e4d2f7f6f 100644 (file)
@@ -22,6 +22,8 @@ Optional properties:
 - fsl,err006687-workaround-present: If present indicates that the system has
   the hardware workaround for ERR006687 applied and does not need a software
   workaround.
+- gpr: phandle of SoC general purpose register mode. Required for wake on LAN
+  on some SoCs
  -interrupt-names:  names of the interrupts listed in interrupts property in
   the same order. The defaults if not specified are
   __Number of interrupts__   __Default__
index b9f90081046f76838c02fc2ff1d46aa30e7c1bc1..67df3fe861eed9dcc993eb54d66862f2d557d231 100644 (file)
@@ -48,6 +48,7 @@ examples:
 
         switch@10 {
             compatible = "qca,qca8337";
+            reg = <0x10>;
             /* ... */
         };
     };
index beca6466d59a9d7d1f9b6afaffa1e6d246c5352a..d2202791c1d4c0c57eb829ed389ddaf75e951792 100644 (file)
@@ -29,7 +29,7 @@ Required properties for compatible string qcom,wcn399x-bt:
 
 Optional properties for compatible string qcom,wcn399x-bt:
 
- - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
+ - max-speed: see Documentation/devicetree/bindings/serial/serial.yaml
  - firmware-name: specify the name of nvm firmware to load
  - clocks: clock provided to the controller
 
index fd1982c5610467aa7753ee0af2e35d6576bbdab5..3f913d6d1c3d8beec2b6e8a9764d76094e05bab2 100644 (file)
@@ -146,7 +146,7 @@ patternProperties:
       bindings specified in
       Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
       Torrent SERDES should follow the bindings specified in
-      Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
+      Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
 
 required:
   - compatible
index 24c217b7658032867bcf9cf8da55f0ca5ba12f69..41ece1d853152efcc30ddbc28dfa36c0deb036f0 100644 (file)
@@ -31,10 +31,17 @@ additionalProperties: false
 
 examples:
   - |
-    cros-ec@0 {
-        compatible = "google,cros-ec-spi";
-        cros_ec_pwm: ec-pwm {
-            compatible = "google,cros-ec-pwm";
-            #pwm-cells = <1>;
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        cros-ec@0 {
+            compatible = "google,cros-ec-spi";
+            reg = <0>;
+
+            cros_ec_pwm: ec-pwm {
+                compatible = "google,cros-ec-pwm";
+                #pwm-cells = <1>;
+            };
         };
     };
index 89ab67f20a7fe9a57834785229b58b2461551eea..c147900f9041ab93ed9ba0031551456fa8ee9ed6 100644 (file)
@@ -39,7 +39,7 @@ additionalProperties: false
 
 examples:
   - |
-    rng {
+    rng@7e104000 {
         compatible = "brcm,bcm2835-rng";
         reg = <0x7e104000 0x10>;
         interrupts = <2 29>;
index 0cf470eaf2a07e62d9a6fab26f2f2a6ccc33693d..5c16cf59ca00523582247a0a70001b140e16492c 100644 (file)
@@ -61,7 +61,7 @@ examples:
     #include <dt-bindings/clock/qcom,gcc-sdm845.h>
     #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-    soc: soc@0 {
+    soc: soc {
         #address-cells = <2>;
         #size-cells = <2>;
 
index 1d687787507734fcffece11bfac717da5e9fe39d..c2d2ee43ba67e4c02c69525ac4bae5b9448cba0b 100644 (file)
@@ -56,7 +56,7 @@ additionalProperties: false
 examples:
   - |
     #include <dt-bindings/clock/jz4740-cgu.h>
-    usb_phy: usb-phy@0 {
+    usb_phy: usb-phy {
       compatible = "usb-nop-xceiv";
       #phy-cells = <0>;
     };
index cb695aa3fba4ccdc13988514949a84d51b9abe7c..fbdd0175675209bb58267bc0fee2d5365d917e33 100644 (file)
@@ -52,8 +52,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
 the node is not important. The content of the node is defined in dwc3.txt.
 
 Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt   - USB3 QMP PHY
-Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt - USB2 QUSB2 PHY
+Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt    - USB3 QMP PHY
+Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml - USB2 QUSB2 PHY
 
 Example device nodes:
 
index c8c4b00ecb941fe85144fb3efd8c5cfa4ec0e5e4..94520493233bb51ae663cc3b30f6ea1816474bd2 100644 (file)
@@ -16,7 +16,7 @@ A child node must exist to represent the core DWC3 IP block. The name of
 the node is not important. The content of the node is defined in dwc3.txt.
 
 Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
+Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml - USB2.0 PHY
 Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt     - Type-C PHY
 
 Example device nodes:
index d3891386d6710effe562acad382bf71f7fa16411..7a39732c582d1248eaed218f16a2c435ba881e65 100644 (file)
@@ -187,6 +187,8 @@ patternProperties:
     description: ChipOne
   "^chipspark,.*":
     description: ChipSPARK
+  "^chrontel,.*":
+    description: Chrontel, Inc.
   "^chrp,.*":
     description: Common Hardware Reference Platform
   "^chunghwa,.*":
@@ -463,6 +465,8 @@ patternProperties:
     description: Infineon Technologies
   "^inforce,.*":
     description: Inforce Computing
+  "^ivo,.*":
+    description: InfoVision Optoelectronics Kunshan Co. Ltd.
   "^ingenic,.*":
     description: Ingenic Semiconductor
   "^innolux,.*":
@@ -488,7 +492,7 @@ patternProperties:
   "^issi,.*":
     description: Integrated Silicon Solutions Inc.
   "^ite,.*":
-    description: ITE Tech, Inc.
+    description: ITE Tech. Inc.
   "^itead,.*":
     description: ITEAD Intelligent Systems Co.Ltd
   "^iwave,.*":
@@ -1039,6 +1043,8 @@ patternProperties:
     description: Tronsmart
   "^truly,.*":
     description: Truly Semiconductors Limited
+  "^visionox,.*":
+    description: Visionox
   "^tsd,.*":
     description: Theobroma Systems Design und Consulting GmbH
   "^tyan,.*":
index c78db28519f740127a02ae956085e7a4ca9113b1..63dec76d1d8d0dfe7530f5965223f13e21107d56 100644 (file)
@@ -11,7 +11,7 @@ course not limited to GPU use cases.
 The three main components of this are: (1) dma-buf, representing a
 sg_table and exposed to userspace as a file descriptor to allow passing
 between devices, (2) fence, which provides a mechanism to signal when
-one device as finished access, and (3) reservation, which manages the
+one device has finished access, and (3) reservation, which manages the
 shared or exclusive fence(s) associated with the buffer.
 
 Shared DMA Buffers
@@ -31,7 +31,7 @@ The exporter
  - implements and manages operations in :c:type:`struct dma_buf_ops
    <dma_buf_ops>` for the buffer,
  - allows other users to share the buffer by using dma_buf sharing APIs,
- - manages the details of buffer allocation, wrapped int a :c:type:`struct
+ - manages the details of buffer allocation, wrapped in a :c:type:`struct
    dma_buf <dma_buf>`,
  - decides about the actual backing storage where this allocation happens,
  - and takes care of any migration of scatterlist - for all (shared) users of
index 0efede580039cdcbb074983521731f59d3a7b946..4cc74325bf91417d5523de7a7e23b5477325aaf2 100644 (file)
@@ -202,3 +202,91 @@ busy_percent
 
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
    :doc: busy_percent
+
+GPU Product Information
+=======================
+
+Information about the GPU can be obtained on certain cards
+via sysfs
+
+product_name
+------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: product_name
+
+product_number
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: product_name
+
+serial_number
+-------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: serial_number
+
+unique_id
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: unique_id
+
+GPU Memory Usage Information
+============================
+
+Various memory accounting can be accessed via sysfs
+
+mem_info_vram_total
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vram_total
+
+mem_info_vram_used
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vram_used
+
+mem_info_vis_vram_total
+-----------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vis_vram_total
+
+mem_info_vis_vram_used
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vis_vram_used
+
+mem_info_gtt_total
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+   :doc: mem_info_gtt_total
+
+mem_info_gtt_used
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+   :doc: mem_info_gtt_used
+
+PCIe Accounting Information
+===========================
+
+pcie_bw
+-------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pcie_bw
+
+pcie_replay_count
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: pcie_replay_count
+
+
index a73320576ca9a18e71705694dd3c833c83b62a47..12272b168580be9aa87e86644c3d94770351a2b3 100644 (file)
@@ -132,6 +132,18 @@ be unmapped; on many devices, the ROM address decoder is shared with
 other BARs, so leaving it mapped could cause undesired behaviour like
 hangs or memory corruption.
 
+Managed Resources
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_managed.c
+   :doc: managed resources
+
+.. kernel-doc:: drivers/gpu/drm/drm_managed.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_managed.h
+   :internal:
+
 Bus-specific Device Registration and PCI Support
 ------------------------------------------------
 
index 906771e03103240fd6fa2051631ff45ce7358ef7..397314d08f77f573de8922fad53d5ab2425d836c 100644 (file)
@@ -3,7 +3,7 @@ Kernel Mode Setting (KMS)
 =========================
 
 Drivers must initialize the mode setting core by calling
-drm_mode_config_init() on the DRM device. The function
+drmm_mode_config_init() on the DRM device. The function
 initializes the :c:type:`struct drm_device <drm_device>`
 mode_config field and never fails. Once done, mode configuration must
 be setup by initializing the following fields.
@@ -397,6 +397,9 @@ Connector Functions Reference
 Writeback Connectors
 --------------------
 
+.. kernel-doc:: include/drm/drm_writeback.h
+  :internal:
+
 .. kernel-doc:: drivers/gpu/drm/drm_writeback.c
   :doc: overview
 
index c77b326012606413493c76efd52d7622eebba296..1839762044be18f7583b2d28955a375d814347d6 100644 (file)
@@ -373,15 +373,6 @@ GEM CMA Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
    :export:
 
-VRAM Helper Function Reference
-==============================
-
-.. kernel-doc:: drivers/gpu/drm/drm_vram_helper_common.c
-   :doc: overview
-
-.. kernel-doc:: include/drm/drm_gem_vram_helper.h
-   :internal:
-
 GEM VRAM Helper Functions Reference
 -----------------------------------
 
index 439656f55c5da8ec957f2da313d1ca430bd59714..658b52f7ffc6c3c667987a465810b059f1ed733d 100644 (file)
@@ -347,18 +347,6 @@ Contact: Sean Paul
 
 Level: Starter
 
-Remove drm_display_mode.hsync
------------------------------
-
-We have drm_mode_hsync() to calculate this from hsync_start/end, since drivers
-shouldn't/don't use this, remove this member to avoid any temptations to use it
-in the future. If there is any debug code using drm_display_mode.hsync, convert
-it to use drm_mode_hsync() instead.
-
-Contact: Sean Paul
-
-Level: Starter
-
 connector register/unregister fixes
 -----------------------------------
 
index cc4b61447b63b135394a084742d70b24630eed57..0e71b22047f897b6510e52b56d6065c38d1523f1 100644 (file)
@@ -16,7 +16,7 @@ Supported chips:
 
   * Renesas ISL68220
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl68220'
 
     Addresses scanned: -
 
@@ -26,7 +26,7 @@ Supported chips:
 
   * Renesas ISL68221
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl68221'
 
     Addresses scanned: -
 
@@ -36,7 +36,7 @@ Supported chips:
 
   * Renesas ISL68222
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl68222'
 
     Addresses scanned: -
 
@@ -46,7 +46,7 @@ Supported chips:
 
   * Renesas ISL68223
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl68223'
 
     Addresses scanned: -
 
@@ -56,7 +56,7 @@ Supported chips:
 
   * Renesas ISL68224
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl68224'
 
     Addresses scanned: -
 
@@ -66,7 +66,7 @@ Supported chips:
 
   * Renesas ISL68225
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl68225'
 
     Addresses scanned: -
 
@@ -76,7 +76,7 @@ Supported chips:
 
   * Renesas ISL68226
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl68226'
 
     Addresses scanned: -
 
@@ -86,7 +86,7 @@ Supported chips:
 
   * Renesas ISL68227
 
-    Prefix: 'raa_dmpvr2_1rail'
+    Prefix: 'isl68227'
 
     Addresses scanned: -
 
@@ -96,7 +96,7 @@ Supported chips:
 
   * Renesas ISL68229
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl68229'
 
     Addresses scanned: -
 
@@ -106,7 +106,7 @@ Supported chips:
 
   * Renesas ISL68233
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl68233'
 
     Addresses scanned: -
 
@@ -116,7 +116,7 @@ Supported chips:
 
   * Renesas ISL68239
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl68239'
 
     Addresses scanned: -
 
@@ -126,7 +126,7 @@ Supported chips:
 
   * Renesas ISL69222
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69222'
 
     Addresses scanned: -
 
@@ -136,7 +136,7 @@ Supported chips:
 
   * Renesas ISL69223
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl69223'
 
     Addresses scanned: -
 
@@ -146,7 +146,7 @@ Supported chips:
 
   * Renesas ISL69224
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69224'
 
     Addresses scanned: -
 
@@ -156,7 +156,7 @@ Supported chips:
 
   * Renesas ISL69225
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69225'
 
     Addresses scanned: -
 
@@ -166,7 +166,7 @@ Supported chips:
 
   * Renesas ISL69227
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl69227'
 
     Addresses scanned: -
 
@@ -176,7 +176,7 @@ Supported chips:
 
   * Renesas ISL69228
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl69228'
 
     Addresses scanned: -
 
@@ -186,7 +186,7 @@ Supported chips:
 
   * Renesas ISL69234
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69234'
 
     Addresses scanned: -
 
@@ -196,7 +196,7 @@ Supported chips:
 
   * Renesas ISL69236
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69236'
 
     Addresses scanned: -
 
@@ -206,7 +206,7 @@ Supported chips:
 
   * Renesas ISL69239
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl69239'
 
     Addresses scanned: -
 
@@ -216,7 +216,7 @@ Supported chips:
 
   * Renesas ISL69242
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69242'
 
     Addresses scanned: -
 
@@ -226,7 +226,7 @@ Supported chips:
 
   * Renesas ISL69243
 
-    Prefix: 'raa_dmpvr2_1rail'
+    Prefix: 'isl69243'
 
     Addresses scanned: -
 
@@ -236,7 +236,7 @@ Supported chips:
 
   * Renesas ISL69247
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69247'
 
     Addresses scanned: -
 
@@ -246,7 +246,7 @@ Supported chips:
 
   * Renesas ISL69248
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69248'
 
     Addresses scanned: -
 
@@ -256,7 +256,7 @@ Supported chips:
 
   * Renesas ISL69254
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69254'
 
     Addresses scanned: -
 
@@ -266,7 +266,7 @@ Supported chips:
 
   * Renesas ISL69255
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69255'
 
     Addresses scanned: -
 
@@ -276,7 +276,7 @@ Supported chips:
 
   * Renesas ISL69256
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69256'
 
     Addresses scanned: -
 
@@ -286,7 +286,7 @@ Supported chips:
 
   * Renesas ISL69259
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69259'
 
     Addresses scanned: -
 
@@ -296,7 +296,7 @@ Supported chips:
 
   * Renesas ISL69260
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69260'
 
     Addresses scanned: -
 
@@ -306,7 +306,7 @@ Supported chips:
 
   * Renesas ISL69268
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69268'
 
     Addresses scanned: -
 
@@ -316,7 +316,7 @@ Supported chips:
 
   * Renesas ISL69269
 
-    Prefix: 'raa_dmpvr2_3rail'
+    Prefix: 'isl69269'
 
     Addresses scanned: -
 
@@ -326,7 +326,7 @@ Supported chips:
 
   * Renesas ISL69298
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'isl69298'
 
     Addresses scanned: -
 
@@ -336,7 +336,7 @@ Supported chips:
 
   * Renesas RAA228000
 
-    Prefix: 'raa_dmpvr2_hv'
+    Prefix: 'raa228000'
 
     Addresses scanned: -
 
@@ -346,7 +346,7 @@ Supported chips:
 
   * Renesas RAA228004
 
-    Prefix: 'raa_dmpvr2_hv'
+    Prefix: 'raa228004'
 
     Addresses scanned: -
 
@@ -356,7 +356,7 @@ Supported chips:
 
   * Renesas RAA228006
 
-    Prefix: 'raa_dmpvr2_hv'
+    Prefix: 'raa228006'
 
     Addresses scanned: -
 
@@ -366,7 +366,7 @@ Supported chips:
 
   * Renesas RAA228228
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'raa228228'
 
     Addresses scanned: -
 
@@ -376,7 +376,7 @@ Supported chips:
 
   * Renesas RAA229001
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'raa229001'
 
     Addresses scanned: -
 
@@ -386,7 +386,7 @@ Supported chips:
 
   * Renesas RAA229004
 
-    Prefix: 'raa_dmpvr2_2rail'
+    Prefix: 'raa229004'
 
     Addresses scanned: -
 
index a09971c2115cd27355542dba40c16213068448ac..fe089acb7783ea5b2079dceac16da6c7c5dc4cf3 100644 (file)
@@ -257,6 +257,8 @@ drivers:
   * :doc:`netdevsim`
   * :doc:`mlxsw`
 
+.. _Generic-Packet-Trap-Groups:
+
 Generic Packet Trap Groups
 ==========================
 
index 50133d9761c992bee565d9415d4758e66e45c028..6538ede29661e802ac6e826a688ed03e5d41ed70 100644 (file)
@@ -22,6 +22,7 @@ Contents:
    z8530book
    msg_zerocopy
    failover
+   net_dim
    net_failover
    phy
    sfp-phylink
index ee961d322d931213745e5d4abbc27d400b1420b8..6fcfd313dbe417ee7ee1098870ccb72a42a797a6 100644 (file)
@@ -812,7 +812,7 @@ tcp_limit_output_bytes - INTEGER
 tcp_challenge_ack_limit - INTEGER
        Limits number of Challenge ACK sent per second, as recommended
        in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
-       Default: 100
+       Default: 1000
 
 tcp_rx_skb_cache - BOOLEAN
        Controls a per TCP socket cache of one skb, that might help
diff --git a/Documentation/networking/net_dim.rst b/Documentation/networking/net_dim.rst
new file mode 100644 (file)
index 0000000..3bed9fd
--- /dev/null
@@ -0,0 +1,176 @@
+======================================================
+Net DIM - Generic Network Dynamic Interrupt Moderation
+======================================================
+
+:Author: Tal Gilboa <talgi@mellanox.com>
+
+.. contents:: :depth: 2
+
+Assumptions
+===========
+
+This document assumes the reader has basic knowledge in network drivers
+and in general interrupt moderation.
+
+
+Introduction
+============
+
+Dynamic Interrupt Moderation (DIM) (in networking) refers to changing the
+interrupt moderation configuration of a channel in order to optimize packet
+processing. The mechanism includes an algorithm which decides if and how to
+change moderation parameters for a channel, usually by performing an analysis on
+runtime data sampled from the system. Net DIM is such a mechanism. In each
+iteration of the algorithm, it analyses a given sample of the data, compares it
+to the previous sample and if required, it can decide to change some of the
+interrupt moderation configuration fields. The data sample is composed of data
+bandwidth, the number of packets and the number of events. The time between
+samples is also measured. Net DIM compares the current and the previous data and
+returns an adjusted interrupt moderation configuration object. In some cases,
+the algorithm might decide not to change anything. The configuration fields are
+the minimum duration (microseconds) allowed between events and the maximum
+number of wanted packets per event. The Net DIM algorithm ascribes importance to
+increase bandwidth over reducing interrupt rate.
+
+
+Net DIM Algorithm
+=================
+
+Each iteration of the Net DIM algorithm follows these steps:
+
+#. Calculates new data sample.
+#. Compares it to previous sample.
+#. Makes a decision - suggests interrupt moderation configuration fields.
+#. Applies a schedule work function, which applies suggested configuration.
+
+The first two steps are straightforward, both the new and the previous data are
+supplied by the driver registered to Net DIM. The previous data is the new data
+supplied to the previous iteration. The comparison step checks the difference
+between the new and previous data and decides on the result of the last step.
+A step would result as "better" if bandwidth increases and as "worse" if
+bandwidth reduces. If there is no change in bandwidth, the packet rate is
+compared in a similar fashion - increase == "better" and decrease == "worse".
+In case there is no change in the packet rate as well, the interrupt rate is
+compared. Here the algorithm tries to optimize for lower interrupt rate so an
+increase in the interrupt rate is considered "worse" and a decrease is
+considered "better". Step #2 has an optimization for avoiding false results: it
+only considers a difference between samples as valid if it is greater than a
+certain percentage. Also, since Net DIM does not measure anything by itself, it
+assumes the data provided by the driver is valid.
+
+Step #3 decides on the suggested configuration based on the result from step #2
+and the internal state of the algorithm. The states reflect the "direction" of
+the algorithm: is it going left (reducing moderation), right (increasing
+moderation) or standing still. Another optimization is that if a decision
+to stay still is made multiple times, the interval between iterations of the
+algorithm would increase in order to reduce calculation overhead. Also, after
+"parking" on one of the most left or most right decisions, the algorithm may
+decide to verify this decision by taking a step in the other direction. This is
+done in order to avoid getting stuck in a "deep sleep" scenario. Once a
+decision is made, an interrupt moderation configuration is selected from
+the predefined profiles.
+
+The last step is to notify the registered driver that it should apply the
+suggested configuration. This is done by scheduling a work function, defined by
+the Net DIM API and provided by the registered driver.
+
+As you can see, Net DIM itself does not actively interact with the system. It
+would have trouble making the correct decisions if the wrong data is supplied to
+it and it would be useless if the work function would not apply the suggested
+configuration. This does, however, allow the registered driver some room for
+manoeuvre as it may provide partial data or ignore the algorithm suggestion
+under some conditions.
+
+
+Registering a Network Device to DIM
+===================================
+
+Net DIM API exposes the main function net_dim().
+This function is the entry point to the Net
+DIM algorithm and has to be called every time the driver would like to check if
+it should change interrupt moderation parameters. The driver should provide two
+data structures: :c:type:`struct dim <dim>` and
+:c:type:`struct dim_sample <dim_sample>`. :c:type:`struct dim <dim>`
+describes the state of DIM for a specific object (RX queue, TX queue,
+other queues, etc.). This includes the current selected profile, previous data
+samples, the callback function provided by the driver and more.
+:c:type:`struct dim_sample <dim_sample>` describes a data sample,
+which will be compared to the data sample stored in :c:type:`struct dim <dim>`
+in order to decide on the algorithm's next
+step. The sample should include bytes, packets and interrupts, measured by
+the driver.
+
+In order to use Net DIM from a networking driver, the driver needs to call the
+main net_dim() function. The recommended method is to call net_dim() on each
+interrupt. Since Net DIM has a built-in moderation and it might decide to skip
+iterations under certain conditions, there is no need to moderate the net_dim()
+calls as well. As mentioned above, the driver needs to provide an object of type
+:c:type:`struct dim <dim>` to the net_dim() function call. It is advised for
+each entity using Net DIM to hold a :c:type:`struct dim <dim>` as part of its
+data structure and use it as the main Net DIM API object.
+The :c:type:`struct dim_sample <dim_sample>` should hold the latest
+bytes, packets and interrupts count. No need to perform any calculations, just
+include the raw data.
+
+The net_dim() call itself does not return anything. Instead Net DIM relies on
+the driver to provide a callback function, which is called when the algorithm
+decides to make a change in the interrupt moderation parameters. This callback
+will be scheduled and run in a separate thread in order not to add overhead to
+the data flow. After the work is done, Net DIM algorithm needs to be set to
+the proper state in order to move to the next iteration.
+
+
+Example
+=======
+
+The following code demonstrates how to register a driver to Net DIM. The actual
+usage is not complete but it should make the outline of the usage clear.
+
+.. code-block:: c
+
+  #include <linux/dim.h>
+
+  /* Callback for net DIM to schedule on a decision to change moderation */
+  void my_driver_do_dim_work(struct work_struct *work)
+  {
+       /* Get struct dim from struct work_struct */
+       struct dim *dim = container_of(work, struct dim,
+                                      work);
+       /* Do interrupt moderation related stuff */
+       ...
+
+       /* Signal net DIM work is done and it should move to next iteration */
+       dim->state = DIM_START_MEASURE;
+  }
+
+  /* My driver's interrupt handler */
+  int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
+  {
+       ...
+       /* A struct to hold current measured data */
+       struct dim_sample dim_sample;
+       ...
+       /* Initiate data sample struct with current data */
+       dim_update_sample(my_entity->events,
+                         my_entity->packets,
+                         my_entity->bytes,
+                         &dim_sample);
+       /* Call net DIM */
+       net_dim(&my_entity->dim, dim_sample);
+       ...
+  }
+
+  /* My entity's initialization function (my_entity was already allocated) */
+  int my_driver_init_my_entity(struct my_driver_entity *my_entity, ...)
+  {
+       ...
+       /* Initiate struct work_struct with my driver's callback function */
+       INIT_WORK(&my_entity->dim.work, my_driver_do_dim_work);
+       ...
+  }
+
+Dynamic Interrupt Moderation (DIM) library API
+==============================================
+
+.. kernel-doc:: include/linux/dim.h
+    :internal:
diff --git a/Documentation/networking/net_dim.txt b/Documentation/networking/net_dim.txt
deleted file mode 100644 (file)
index 9bdb7d5..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-Net DIM - Generic Network Dynamic Interrupt Moderation
-======================================================
-
-Author:
-       Tal Gilboa <talgi@mellanox.com>
-
-
-Contents
-=========
-
-- Assumptions
-- Introduction
-- The Net DIM Algorithm
-- Registering a Network Device to DIM
-- Example
-
-Part 0: Assumptions
-======================
-
-This document assumes the reader has basic knowledge in network drivers
-and in general interrupt moderation.
-
-
-Part I: Introduction
-======================
-
-Dynamic Interrupt Moderation (DIM) (in networking) refers to changing the
-interrupt moderation configuration of a channel in order to optimize packet
-processing. The mechanism includes an algorithm which decides if and how to
-change moderation parameters for a channel, usually by performing an analysis on
-runtime data sampled from the system. Net DIM is such a mechanism. In each
-iteration of the algorithm, it analyses a given sample of the data, compares it
-to the previous sample and if required, it can decide to change some of the
-interrupt moderation configuration fields. The data sample is composed of data
-bandwidth, the number of packets and the number of events. The time between
-samples is also measured. Net DIM compares the current and the previous data and
-returns an adjusted interrupt moderation configuration object. In some cases,
-the algorithm might decide not to change anything. The configuration fields are
-the minimum duration (microseconds) allowed between events and the maximum
-number of wanted packets per event. The Net DIM algorithm ascribes importance to
-increase bandwidth over reducing interrupt rate.
-
-
-Part II: The Net DIM Algorithm
-===============================
-
-Each iteration of the Net DIM algorithm follows these steps:
-1. Calculates new data sample.
-2. Compares it to previous sample.
-3. Makes a decision - suggests interrupt moderation configuration fields.
-4. Applies a schedule work function, which applies suggested configuration.
-
-The first two steps are straightforward, both the new and the previous data are
-supplied by the driver registered to Net DIM. The previous data is the new data
-supplied to the previous iteration. The comparison step checks the difference
-between the new and previous data and decides on the result of the last step.
-A step would result as "better" if bandwidth increases and as "worse" if
-bandwidth reduces. If there is no change in bandwidth, the packet rate is
-compared in a similar fashion - increase == "better" and decrease == "worse".
-In case there is no change in the packet rate as well, the interrupt rate is
-compared. Here the algorithm tries to optimize for lower interrupt rate so an
-increase in the interrupt rate is considered "worse" and a decrease is
-considered "better". Step #2 has an optimization for avoiding false results: it
-only considers a difference between samples as valid if it is greater than a
-certain percentage. Also, since Net DIM does not measure anything by itself, it
-assumes the data provided by the driver is valid.
-
-Step #3 decides on the suggested configuration based on the result from step #2
-and the internal state of the algorithm. The states reflect the "direction" of
-the algorithm: is it going left (reducing moderation), right (increasing
-moderation) or standing still. Another optimization is that if a decision
-to stay still is made multiple times, the interval between iterations of the
-algorithm would increase in order to reduce calculation overhead. Also, after
-"parking" on one of the most left or most right decisions, the algorithm may
-decide to verify this decision by taking a step in the other direction. This is
-done in order to avoid getting stuck in a "deep sleep" scenario. Once a
-decision is made, an interrupt moderation configuration is selected from
-the predefined profiles.
-
-The last step is to notify the registered driver that it should apply the
-suggested configuration. This is done by scheduling a work function, defined by
-the Net DIM API and provided by the registered driver.
-
-As you can see, Net DIM itself does not actively interact with the system. It
-would have trouble making the correct decisions if the wrong data is supplied to
-it and it would be useless if the work function would not apply the suggested
-configuration. This does, however, allow the registered driver some room for
-manoeuvre as it may provide partial data or ignore the algorithm suggestion
-under some conditions.
-
-
-Part III: Registering a Network Device to DIM
-==============================================
-
-Net DIM API exposes the main function net_dim(struct dim *dim,
-struct dim_sample end_sample). This function is the entry point to the Net
-DIM algorithm and has to be called every time the driver would like to check if
-it should change interrupt moderation parameters. The driver should provide two
-data structures: struct dim and struct dim_sample. Struct dim
-describes the state of DIM for a specific object (RX queue, TX queue,
-other queues, etc.). This includes the current selected profile, previous data
-samples, the callback function provided by the driver and more.
-Struct dim_sample describes a data sample, which will be compared to the
-data sample stored in struct dim in order to decide on the algorithm's next
-step. The sample should include bytes, packets and interrupts, measured by
-the driver.
-
-In order to use Net DIM from a networking driver, the driver needs to call the
-main net_dim() function. The recommended method is to call net_dim() on each
-interrupt. Since Net DIM has a built-in moderation and it might decide to skip
-iterations under certain conditions, there is no need to moderate the net_dim()
-calls as well. As mentioned above, the driver needs to provide an object of type
-struct dim to the net_dim() function call. It is advised for each entity
-using Net DIM to hold a struct dim as part of its data structure and use it
-as the main Net DIM API object. The struct dim_sample should hold the latest
-bytes, packets and interrupts count. No need to perform any calculations, just
-include the raw data.
-
-The net_dim() call itself does not return anything. Instead Net DIM relies on
-the driver to provide a callback function, which is called when the algorithm
-decides to make a change in the interrupt moderation parameters. This callback
-will be scheduled and run in a separate thread in order not to add overhead to
-the data flow. After the work is done, Net DIM algorithm needs to be set to
-the proper state in order to move to the next iteration.
-
-
-Part IV: Example
-=================
-
-The following code demonstrates how to register a driver to Net DIM. The actual
-usage is not complete but it should make the outline of the usage clear.
-
-my_driver.c:
-
-#include <linux/dim.h>
-
-/* Callback for net DIM to schedule on a decision to change moderation */
-void my_driver_do_dim_work(struct work_struct *work)
-{
-       /* Get struct dim from struct work_struct */
-       struct dim *dim = container_of(work, struct dim,
-                                      work);
-       /* Do interrupt moderation related stuff */
-       ...
-
-       /* Signal net DIM work is done and it should move to next iteration */
-       dim->state = DIM_START_MEASURE;
-}
-
-/* My driver's interrupt handler */
-int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
-{
-       ...
-       /* A struct to hold current measured data */
-       struct dim_sample dim_sample;
-       ...
-       /* Initiate data sample struct with current data */
-       dim_update_sample(my_entity->events,
-                         my_entity->packets,
-                         my_entity->bytes,
-                         &dim_sample);
-       /* Call net DIM */
-       net_dim(&my_entity->dim, dim_sample);
-       ...
-}
-
-/* My entity's initialization function (my_entity was already allocated) */
-int my_driver_init_my_entity(struct my_driver_entity *my_entity, ...)
-{
-       ...
-       /* Initiate struct work_struct with my driver's callback function */
-       INIT_WORK(&my_entity->dim.work, my_driver_do_dim_work);
-       ...
-}
index fa7ddc0428c871cb57cfcce50960f0f1af639631..5325c71ca877a66615bae86efd34bad3025fce08 100644 (file)
@@ -1399,8 +1399,8 @@ must have read/write permission; CS must be __BOOT_CS and DS, ES, SS
 must be __BOOT_DS; interrupt must be disabled; %rsi must hold the base
 address of the struct boot_params.
 
-EFI Handover Protocol
-=====================
+EFI Handover Protocol (deprecated)
+==================================
 
 This protocol allows boot loaders to defer initialisation to the EFI
 boot stub. The boot loader is required to load the kernel/initrd(s)
@@ -1408,6 +1408,12 @@ from the boot media and jump to the EFI handover protocol entry point
 which is hdr->handover_offset bytes from the beginning of
 startup_{32,64}.
 
+The boot loader MUST respect the kernel's PE/COFF metadata when it comes
+to section alignment, the memory footprint of the executable image beyond
+the size of the file itself, and any other aspect of the PE/COFF header
+that may affect correct operation of the image as a PE/COFF binary in the
+execution context provided by the EFI firmware.
+
 The function prototype for the handover entry point looks like this::
 
     efi_main(void *handle, efi_system_table_t *table, struct boot_params *bp)
@@ -1419,9 +1425,18 @@ UEFI specification. 'bp' is the boot loader-allocated boot params.
 
 The boot loader *must* fill out the following fields in bp::
 
-  - hdr.code32_start
   - hdr.cmd_line_ptr
   - hdr.ramdisk_image (if applicable)
   - hdr.ramdisk_size  (if applicable)
 
 All other fields should be zero.
+
+NOTE: The EFI Handover Protocol is deprecated in favour of the ordinary PE/COFF
+      entry point, combined with the LINUX_EFI_INITRD_MEDIA_GUID based initrd
+      loading protocol (refer to [0] for an example of the bootloader side of
+      this), which removes the need for any knowledge on the part of the EFI
+      bootloader regarding the internal representation of boot_params or any
+      requirements/limitations regarding the placement of the command line
+      and ramdisk in memory, or the placement of the kernel image itself.
+
+[0] https://github.com/u-boot/u-boot/commit/ec80b4735a593961fe701cc3a5d717d4739b0fd0
index e64e5db314976dc9f73107c9bd616aeaa3dd9c70..f437f42b73ad74e6b59f5feaa088e65243e10770 100644 (file)
@@ -1323,7 +1323,10 @@ ARM INTEGRATOR, VERSATILE AND REALVIEW SUPPORT
 M:     Linus Walleij <linus.walleij@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/arm/arm-boards
+F:     Documentation/devicetree/bindings/arm/arm,integrator.yaml
+F:     Documentation/devicetree/bindings/arm/arm,realview.yaml
+F:     Documentation/devicetree/bindings/arm/arm,versatile.yaml
+F:     Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
 F:     Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
 F:     Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
 F:     Documentation/devicetree/bindings/i2c/i2c-versatile.txt
@@ -5042,7 +5045,7 @@ F:        drivers/dma-buf/
 F:     include/linux/*fence.h
 F:     include/linux/dma-buf*
 F:     include/linux/dma-resv.h
-K:     dma_(buf|fence|resv)
+K:     \bdma_(?:buf|fence|resv)\b
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:     Vinod Koul <vkoul@kernel.org>
@@ -5253,7 +5256,7 @@ DRM DRIVER FOR ARM VERSATILE TFT PANELS
 M:     Linus Walleij <linus.walleij@linaro.org>
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
-F:     Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt
+F:     Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
 F:     drivers/gpu/drm/panel/panel-arm-versatile.c
 
 DRM DRIVER FOR ASPEED BMC GFX
@@ -5279,7 +5282,7 @@ F:        drivers/gpu/drm/bochs/
 DRM DRIVER FOR BOE HIMAX8279D PANELS
 M:     Jerry Han <hanxu5@huaqin.corp-partner.google.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt
+F:     Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
 F:     drivers/gpu/drm/panel/panel-boe-himax8279d.c
 
 DRM DRIVER FOR FARADAY TVE200 TV ENCODER
@@ -5297,7 +5300,7 @@ F:        drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
 DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
 M:     Jagan Teki <jagan@amarulasolutions.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
+F:     Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
 F:     drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
 
 DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
@@ -5332,6 +5335,14 @@ S:       Orphan / Obsolete
 F:     drivers/gpu/drm/i810/
 F:     include/uapi/drm/i810_drm.h
 
+DRM DRIVER FOR LVDS PANELS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+S:     Maintained
+F:     drivers/gpu/drm/panel/panel-lvds.c
+F:     Documentation/devicetree/bindings/display/panel/lvds.yaml
+
 DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
 S:     Orphan / Obsolete
 F:     drivers/gpu/drm/mga/
@@ -5380,7 +5391,7 @@ F:        include/uapi/drm/nouveau_drm.h
 DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
 M:     Stefan Mavrodiev <stefan@olimex.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
+F:     Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
 F:     drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
 
 DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
@@ -5397,7 +5408,7 @@ L:        virtualization@lists.linux-foundation.org
 S:     Obsolete
 W:     https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
 T:     git git://anongit.freedesktop.org/drm/drm-misc
-F:     drivers/gpu/drm/cirrus/
+F:     drivers/gpu/drm/tiny/cirrus.c
 
 DRM DRIVER FOR QXL VIRTUAL GPU
 M:     Dave Airlie <airlied@redhat.com>
@@ -5447,7 +5458,7 @@ F:        drivers/gpu/drm/tiny/st7586.c
 DRM DRIVER FOR SITRONIX ST7701 PANELS
 M:     Jagan Teki <jagan@amarulasolutions.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
+F:     Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
 F:     drivers/gpu/drm/panel/panel-sitronix-st7701.c
 
 DRM DRIVER FOR SITRONIX ST7735R PANELS
@@ -5552,7 +5563,7 @@ M:        Chen-Yu Tsai <wens@csie.org>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
-F:     Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+F:     Documentation/devicetree/bindings/display/allwinner*
 F:     drivers/gpu/drm/sun4i/
 
 DRM DRIVERS FOR AMLOGIC SOCS
@@ -5934,6 +5945,7 @@ M:        Tal Gilboa <talgi@mellanox.com>
 S:     Maintained
 F:     include/linux/dim.h
 F:     lib/dim/
+F:     Documentation/networking/net_dim.rst
 
 DZ DECSTATION DZ11 SERIAL DRIVER
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
@@ -13853,7 +13865,8 @@ S:      Maintained
 F:     drivers/scsi/qla1280.[ch]
 
 QLOGIC QLA2XXX FC-SCSI DRIVER
-M:     hmadhani@marvell.com
+M:     Nilesh Javali <njavali@marvell.com>
+M:     GR-QLogic-Storage-Upstream@marvell.com
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     Documentation/scsi/LICENSE.qla2xxx
@@ -14058,7 +14071,6 @@ F:      drivers/net/wireless/quantenna
 RADEON and AMDGPU DRM DRIVERS
 M:     Alex Deucher <alexander.deucher@amd.com>
 M:     Christian König <christian.koenig@amd.com>
-M:     David (ChunMing) Zhou <David1.Zhou@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
 T:     git git://people.freedesktop.org/~agd5f/linux
index 70def4907036c7c0adfd73fc863ec972cc078e32..49b2709ff44e3fc204ca21777bec4ed93956b707 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index cabdd8f4a2482e2b2362794b3071173c87431b3a..e8e1c866e413a287924656851a202f1c480613b7 100644 (file)
@@ -1450,7 +1450,8 @@ ENTRY(efi_enter_kernel)
                @ running beyond the PoU, and so calling cache_off below from
                @ inside the PE/COFF loader allocated region is unsafe unless
                @ we explicitly clean it to the PoC.
-               adr     r0, call_cache_fn               @ region of code we will
+ ARM(          adrl    r0, call_cache_fn       )
+ THUMB(                adr     r0, call_cache_fn       )       @ region of code we will
                adr     r1, 0f                          @ run with MMU off
                bl      cache_clean_flush
                bl      cache_off
index 47982889d7747a6e37f0e92db187ac7b77efa66a..98da446aa0f27ceb994264f684004d2cc39bcaef 100644 (file)
                                compatible = "fsl,imx6q-fec";
                                reg = <0x02188000 0x4000>;
                                interrupt-names = "int0", "pps";
-                               interrupts-extended =
-                                       <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>,
-                                       <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
+                                            <0 119 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6QDL_CLK_ENET>,
                                         <&clks IMX6QDL_CLK_ENET>,
                                         <&clks IMX6QDL_CLK_ENET_REF>;
                                clock-names = "ipg", "ahb", "ptp";
+                               gpr = <&gpr>;
                                status = "disabled";
                        };
 
index 93b89dc1f53b5b20c1044ceead90f821b112c2fb..b310f13a53f22eb6c59ab2d3ca21b2803b5d7921 100644 (file)
@@ -77,7 +77,6 @@
 };
 
 &fec {
-       /delete-property/interrupts-extended;
        interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
                     <0 119 IRQ_TYPE_LEVEL_HIGH>;
 };
index 5bc82e2671c6e69944d11f8d55446873e442a274..351f891b4842e4d0453b047adb8d98995f2a7081 100644 (file)
@@ -104,6 +104,14 @@ static struct fixed_voltage_config shannon_cf_vcc_pdata __initdata = {
        .enabled_at_boot = 1,
 };
 
+static struct gpiod_lookup_table shannon_display_gpio_table = {
+       .dev_id = "sa11x0-fb",
+       .table = {
+               GPIO_LOOKUP("gpio", 22, "shannon-lcden", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 static void __init shannon_init(void)
 {
        sa11x0_register_fixed_regulator(0, &shannon_cf_vcc_pdata,
@@ -113,6 +121,7 @@ static void __init shannon_init(void)
        sa11x0_register_pcmcia(0, &shannon_pcmcia0_gpio_table);
        sa11x0_register_pcmcia(1, &shannon_pcmcia1_gpio_table);
        sa11x0_ppc_configure_mcp();
+       gpiod_add_lookup_table(&shannon_display_gpio_table);
        sa11x0_register_lcd(&shannon_lcd_info);
        sa11x0_register_mtd(&shannon_flash_data, &shannon_flash_resource, 1);
        sa11x0_register_mcp(&shannon_mcp_data);
index cc29869d12a3ecb7bbb1fd4212355963b654952b..bf85d6db493158499f172c3e2730c8b2d339ec2f 100644 (file)
@@ -929,7 +929,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[],
        rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do LSR operation */
-       if (val < 32) {
+       if (val == 0) {
+               /* An immediate value of 0 encodes a shift amount of 32
+                * for LSR. To shift by 0, don't do anything.
+                */
+       } else if (val < 32) {
                emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
                emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
                emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
@@ -955,7 +959,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[],
        rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do ARSH operation */
-       if (val < 32) {
+       if (val == 0) {
+               /* An immediate value of 0 encodes a shift amount of 32
+                * for ASR. To shift by 0, don't do anything.
+                */
+       } else if (val < 32) {
                emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
                emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
                emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
@@ -992,21 +1000,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
        arm_bpf_put_reg32(dst_hi, rd[0], ctx);
 }
 
+static bool is_ldst_imm(s16 off, const u8 size)
+{
+       s16 off_max = 0;
+
+       switch (size) {
+       case BPF_B:
+       case BPF_W:
+               off_max = 0xfff;
+               break;
+       case BPF_H:
+               off_max = 0xff;
+               break;
+       case BPF_DW:
+               /* Need to make sure off+4 does not overflow. */
+               off_max = 0xfff - 4;
+               break;
+       }
+       return -off_max <= off && off <= off_max;
+}
+
 /* *(size *)(dst + off) = src */
 static inline void emit_str_r(const s8 dst, const s8 src[],
-                             s32 off, struct jit_ctx *ctx, const u8 sz){
+                             s16 off, struct jit_ctx *ctx, const u8 sz){
        const s8 *tmp = bpf2a32[TMP_REG_1];
-       s32 off_max;
        s8 rd;
 
        rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
 
-       if (sz == BPF_H)
-               off_max = 0xff;
-       else
-               off_max = 0xfff;
-
-       if (off < 0 || off > off_max) {
+       if (!is_ldst_imm(off, sz)) {
                emit_a32_mov_i(tmp[0], off, ctx);
                emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
                rd = tmp[0];
@@ -1035,18 +1057,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[],
 
 /* dst = *(size*)(src + off) */
 static inline void emit_ldx_r(const s8 dst[], const s8 src,
-                             s32 off, struct jit_ctx *ctx, const u8 sz){
+                             s16 off, struct jit_ctx *ctx, const u8 sz){
        const s8 *tmp = bpf2a32[TMP_REG_1];
        const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
        s8 rm = src;
-       s32 off_max;
-
-       if (sz == BPF_H)
-               off_max = 0xff;
-       else
-               off_max = 0xfff;
 
-       if (off < 0 || off > off_max) {
+       if (!is_ldst_imm(off, sz)) {
                emit_a32_mov_i(tmp[0], off, ctx);
                emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
                rm = tmp[0];
index dd6804a64f1a041935e80919678be6c8f22594e6..fd4e1ce1daf96ae57aa70c84305781325252ec94 100644 (file)
@@ -36,7 +36,7 @@
 
 #include <linux/mm.h>
 
-struct start_info _xen_start_info;
+static struct start_info _xen_start_info;
 struct start_info *xen_start_info = &_xen_start_info;
 EXPORT_SYMBOL(xen_start_info);
 
index ebc62243283186169d504a05982fe4797f3ead9c..c4ac0ac25a00809bb36d45adcfcd00e5ff369eca 100644 (file)
@@ -49,7 +49,9 @@
 #ifndef CONFIG_BROKEN_GAS_INST
 
 #ifdef __ASSEMBLY__
-#define __emit_inst(x)                 .inst (x)
+// The space separator is omitted so that __emit_inst(x) can be parsed as
+// either an assembler directive or an assembler macro argument.
+#define __emit_inst(x)                 .inst(x)
 #else
 #define __emit_inst(x)                 ".inst " __stringify((x)) "\n\t"
 #endif
index 354b11e27c07af0ea52787ad6dfcde6f9e3aa9fa..033a48f30dbb801a58dfe64998fe102eb663aecc 100644 (file)
@@ -260,18 +260,7 @@ static int __aarch32_alloc_vdso_pages(void)
        if (ret)
                return ret;
 
-       ret = aarch32_alloc_kuser_vdso_page();
-       if (ret) {
-               unsigned long c_vvar =
-                       (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
-               unsigned long c_vdso =
-                       (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
-
-               free_page(c_vvar);
-               free_page(c_vdso);
-       }
-
-       return ret;
+       return aarch32_alloc_kuser_vdso_page();
 }
 #else
 static int __aarch32_alloc_vdso_pages(void)
index a0765aa60ea9aa8dc069387aaece1b3c6a1a24fa..1bff55aa2d54e2ce8dd312da3c7f8d426a78540f 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 generated-y += syscall_table.h
 generic-y += extable.h
-generic-y += hardirq.h
 generic-y += kvm_para.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index a197258595ef6e025429182e8529b5f8084f05c8..62f7bfeb709eb1cdabaccce261320dd0191b3c48 100644 (file)
@@ -55,7 +55,7 @@ config RISCV
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_MMIOWB
        select ARCH_HAS_DEBUG_VIRTUAL
-       select HAVE_EBPF_JIT
+       select HAVE_EBPF_JIT if MMU
        select EDAC_SUPPORT
        select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_HAS_SET_DIRECT_MAP
index cc1985d8750a1fd7ba12825553a0cfc60bfc91b9..d208a9fd6c528e4599cfc6aecf12151f03372e87 100644 (file)
@@ -110,6 +110,16 @@ static bool is_32b_int(s64 val)
        return -(1L << 31) <= val && val < (1L << 31);
 }
 
+static bool in_auipc_jalr_range(s64 val)
+{
+       /*
+        * auipc+jalr can reach any signed PC-relative offset in the range
+        * [-2^31 - 2^11, 2^31 - 2^11).
+        */
+       return (-(1L << 31) - (1L << 11)) <= val &&
+               val < ((1L << 31) - (1L << 11));
+}
+
 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
 {
        /* Note that the immediate from the add is sign-extended,
@@ -380,20 +390,24 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
        *rd = RV_REG_T2;
 }
 
-static void emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
-                              struct rv_jit_context *ctx)
+static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
+                             struct rv_jit_context *ctx)
 {
        s64 upper, lower;
 
        if (rvoff && is_21b_int(rvoff) && !force_jalr) {
                emit(rv_jal(rd, rvoff >> 1), ctx);
-               return;
+               return 0;
+       } else if (in_auipc_jalr_range(rvoff)) {
+               upper = (rvoff + (1 << 11)) >> 12;
+               lower = rvoff & 0xfff;
+               emit(rv_auipc(RV_REG_T1, upper), ctx);
+               emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
+               return 0;
        }
 
-       upper = (rvoff + (1 << 11)) >> 12;
-       lower = rvoff & 0xfff;
-       emit(rv_auipc(RV_REG_T1, upper), ctx);
-       emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
+       pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff);
+       return -ERANGE;
 }
 
 static bool is_signed_bpf_cond(u8 cond)
@@ -407,18 +421,16 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
        s64 off = 0;
        u64 ip;
        u8 rd;
+       int ret;
 
        if (addr && ctx->insns) {
                ip = (u64)(long)(ctx->insns + ctx->ninsns);
                off = addr - ip;
-               if (!is_32b_int(off)) {
-                       pr_err("bpf-jit: target call addr %pK is out of range\n",
-                              (void *)addr);
-                       return -ERANGE;
-               }
        }
 
-       emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
+       ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
+       if (ret)
+               return ret;
        rd = bpf_to_rv_reg(BPF_REG_0, ctx);
        emit(rv_addi(rd, RV_REG_A0, 0), ctx);
        return 0;
@@ -429,7 +441,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 {
        bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
                    BPF_CLASS(insn->code) == BPF_JMP;
-       int s, e, rvoff, i = insn - ctx->prog->insnsi;
+       int s, e, rvoff, ret, i = insn - ctx->prog->insnsi;
        struct bpf_prog_aux *aux = ctx->prog->aux;
        u8 rd = -1, rs = -1, code = insn->code;
        s16 off = insn->off;
@@ -699,7 +711,9 @@ out_be:
        /* JUMP off */
        case BPF_JMP | BPF_JA:
                rvoff = rv_offset(i, off, ctx);
-               emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+               ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+               if (ret)
+                       return ret;
                break;
 
        /* IF (dst COND src) JUMP off */
@@ -801,7 +815,6 @@ out_be:
        case BPF_JMP | BPF_CALL:
        {
                bool fixed;
-               int ret;
                u64 addr;
 
                mark_call(ctx);
@@ -826,7 +839,9 @@ out_be:
                        break;
 
                rvoff = epilogue_offset(ctx);
-               emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+               ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+               if (ret)
+                       return ret;
                break;
 
        /* dst = imm64 */
index b0da5320bcff8420f00037468a17e61e353b5f49..624f5d9b0f79f9c37e2b6e9fad21a6ee31ea7e3f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mm.h>
 #include <linux/hyperv.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 #include <linux/cpuhotplug.h>
 #include <linux/syscore_ops.h>
 #include <clocksource/hyperv_timer.h>
@@ -419,11 +420,14 @@ void hyperv_cleanup(void)
 }
 EXPORT_SYMBOL_GPL(hyperv_cleanup);
 
-void hyperv_report_panic(struct pt_regs *regs, long err)
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
 {
        static bool panic_reported;
        u64 guest_id;
 
+       if (in_die && !panic_on_oops)
+               return;
+
        /*
         * We prefer to report panic on 'die' chain as we have proper
         * registers to report, but if we miss it (e.g. on BUG()) we need
index cdcf48d52a128f7f647f4ee9d5a62d453394f5fc..8391c115c0ecd32da2c9ad856bdb28caf3319143 100644 (file)
@@ -178,8 +178,10 @@ extern void efi_free_boot_services(void);
 extern pgd_t * __init efi_uv1_memmap_phys_prolog(void);
 extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
 
+/* kexec external ABI */
 struct efi_setup_data {
        u64 fw_vendor;
+       u64 __unused;
        u64 tables;
        u64 smbios;
        u64 reserved[8];
index 6685e1218959d21b2c93ceb56992ef291e427ac0..7063b5a43220a96a363f97e3567fabd0fbc8be70 100644 (file)
@@ -41,7 +41,7 @@ struct microcode_amd {
        unsigned int                    mpb[0];
 };
 
-#define PATCH_MAX_SIZE PAGE_SIZE
+#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
 
 #ifdef CONFIG_MICROCODE_AMD
 extern void __init load_ucode_amd_bsp(unsigned int family);
index bf08d4508ecbafd3e5c355a581fd3cd2a1a89d4b..a19a680542ce736ce0f89c3621d0695a9481c6f8 100644 (file)
@@ -1119,35 +1119,53 @@ void switch_to_sld(unsigned long tifn)
        sld_update_msr(!(tifn & _TIF_SLD));
 }
 
-#define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY}
-
 /*
- * The following processors have the split lock detection feature. But
- * since they don't have the IA32_CORE_CAPABILITIES MSR, the feature cannot
- * be enumerated. Enable it by family and model matching on these
- * processors.
+ * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
+ * only be trusted if it is confirmed that a CPU model implements a
+ * specific feature at a particular bit position.
+ *
+ * The possible driver data field values:
+ *
+ * - 0: CPU models that are known to have the per-core split-lock detection
+ *     feature even though they do not enumerate IA32_CORE_CAPABILITIES.
+ *
+ * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
+ *      bit 5 to enumerate the per-core split-lock detection feature.
  */
 static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
-       SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_X),
-       SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_L),
+       X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           0),
+       X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,           0),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        1),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      1),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      1),
        {}
 };
 
 void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
 {
-       u64 ia32_core_caps = 0;
+       const struct x86_cpu_id *m;
+       u64 ia32_core_caps;
+
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               return;
 
-       if (c->x86_vendor != X86_VENDOR_INTEL)
+       m = x86_match_cpu(split_lock_cpu_ids);
+       if (!m)
                return;
-       if (cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) {
-               /* Enumerate features reported in IA32_CORE_CAPABILITIES MSR. */
+
+       switch (m->driver_data) {
+       case 0:
+               break;
+       case 1:
+               if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
+                       return;
                rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
-       } else if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
-               /* Enumerate split lock detection by family and model. */
-               if (x86_match_cpu(split_lock_cpu_ids))
-                       ia32_core_caps |= MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT;
+               if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
+                       return;
+               break;
+       default:
+               return;
        }
 
-       if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
-               split_lock_setup();
+       split_lock_setup();
 }
index caa032ce3fe311507e6168f4ec0a0d742cf7a0b9..ebf34c7bc8bc0dfcb0cac217d522358c8c192b04 100644 (file)
@@ -227,8 +227,8 @@ static void __init ms_hyperv_init_platform(void)
        ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
        ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 
-       pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
-               ms_hyperv.features, ms_hyperv.hints);
+       pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
+               ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
 
        ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
        ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
@@ -263,6 +263,16 @@ static void __init ms_hyperv_init_platform(void)
                        cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
        }
 
+       /*
+        * Hyper-V expects to get crash register data or kmsg when
+        * crash enlightment is available and system crashes. Set
+        * crash_kexec_post_notifiers to be true to make sure that
+        * calling crash enlightment interface before running kdump
+        * kernel.
+        */
+       if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE)
+               crash_kexec_post_notifiers = true;
+
 #ifdef CONFIG_X86_LOCAL_APIC
        if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
            ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
index 89049b343c7a8f27b0af635253c911d15f393f2b..d8cc5223b7ce89e8dc3703bc714b0fe5d29d1d8e 100644 (file)
@@ -578,6 +578,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
        d->id = id;
        cpumask_set_cpu(cpu, &d->cpu_mask);
 
+       rdt_domain_reconfigure_cdp(r);
+
        if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
                kfree(d);
                return;
index 181c992f448c068b4cabf8880eb9cf20716475d6..3dd13f3a8b23187c2b3e0257873c434960b33bae 100644 (file)
@@ -601,5 +601,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
 void __check_limbo(struct rdt_domain *d, bool force_free);
 bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
 bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
+void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
 
 #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
index 064e9ef44cd68ec5eaefc13ccde103c5fe89f565..5a359d9fcc055701956d36a4857f15853702e1c6 100644 (file)
@@ -1859,6 +1859,19 @@ static int set_cache_qos_cfg(int level, bool enable)
        return 0;
 }
 
+/* Restore the qos cfg state when a domain comes online */
+void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
+{
+       if (!r->alloc_capable)
+               return;
+
+       if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
+               l2_qos_cfg_update(&r->alloc_enabled);
+
+       if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
+               l3_qos_cfg_update(&r->alloc_enabled);
+}
+
 /*
  * Enable or disable the MBA software controller
  * which helps user specify bandwidth in MBps.
@@ -3072,7 +3085,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
         * If the rdtgroup is a mon group and parent directory
         * is a valid "mon_groups" directory, remove the mon group.
         */
-       if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
+       if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
+           rdtgrp != &rdtgroup_default) {
                if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
                    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
                        ret = rdtgroup_ctrl_remove(kn, rdtgrp);
index 4d732a444711fe3ab3abcc6ed01f926918b5a6ff..8d5cbe1bbb3bc0dc674ad868dea0623fe5ae55a8 100644 (file)
@@ -81,7 +81,7 @@
 #define        UMIP_INST_SLDT  3       /* 0F 00 /0 */
 #define        UMIP_INST_STR   4       /* 0F 00 /1 */
 
-const char * const umip_insns[5] = {
+static const char * const umip_insns[5] = {
        [UMIP_INST_SGDT] = "SGDT",
        [UMIP_INST_SIDT] = "SIDT",
        [UMIP_INST_SMSW] = "SMSW",
index 211bb9358b733e84ca9fe65b1b7be6670db892a9..c5e393f8bb3f6363f47d94f42465155a2e333179 100644 (file)
@@ -202,7 +202,7 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
 
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-       unsigned long pfn, text, pf;
+       unsigned long pfn, text, pf, rodata;
        struct page *page;
        unsigned npages;
        pgd_t *pgd = efi_mm.pgd;
@@ -256,7 +256,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
        efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
 
-       npages = (__end_rodata_aligned - _text) >> PAGE_SHIFT;
+       npages = (_etext - _text) >> PAGE_SHIFT;
        text = __pa(_text);
        pfn = text >> PAGE_SHIFT;
 
@@ -266,6 +266,14 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
                return 1;
        }
 
+       npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
+       rodata = __pa(__start_rodata);
+       pfn = rodata >> PAGE_SHIFT;
+       if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
+               pr_err("Failed to map kernel rodata 1:1\n");
+               return 1;
+       }
+
        return 0;
 }
 
@@ -638,7 +646,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
        phys_vendor = virt_to_phys_or_null(vnd);
        phys_data = virt_to_phys_or_null_size(data, data_size);
 
-       if (!phys_name || !phys_data)
+       if (!phys_name || (data && !phys_data))
                status = EFI_INVALID_PARAMETER;
        else
                status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -669,7 +677,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
        phys_vendor = virt_to_phys_or_null(vnd);
        phys_data = virt_to_phys_or_null_size(data, data_size);
 
-       if (!phys_name || !phys_data)
+       if (!phys_name || (data && !phys_data))
                status = EFI_INVALID_PARAMETER;
        else
                status = efi_thunk(set_variable, phys_name, phys_vendor,
index 8e56884fd2e92643f1f7ecc32380ea245740c610..a7785df2c94462b9fb9d18faa2fcecb8c4fd8dfd 100644 (file)
@@ -1222,8 +1222,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                rq = list_first_entry(list, struct request, queuelist);
 
                hctx = rq->mq_hctx;
-               if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
+               if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
+                       blk_mq_put_driver_tag(rq);
                        break;
+               }
 
                if (!blk_mq_get_driver_tag(rq)) {
                        /*
index 8641ba9793c5fb0831eebb5743935771b38da5ac..9cb082f38b936e8909735558e0a8512ba331b684 100644 (file)
@@ -313,7 +313,7 @@ static void scale_up(struct rq_wb *rwb)
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_wake_all(rwb);
-       rwb_trace_step(rwb, "scale up");
+       rwb_trace_step(rwb, tracepoint_string("scale up"));
 }
 
 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
@@ -322,7 +322,7 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
                return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
-       rwb_trace_step(rwb, "scale down");
+       rwb_trace_step(rwb, tracepoint_string("scale down"));
 }
 
 static void rwb_arm_timer(struct rq_wb *rwb)
index 0101b65250cb2d86e021b36eaf75c861f4de8a29..0c0a736eb8613d16e9ef597c7ce179d2595bd169 100644 (file)
@@ -410,6 +410,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
        { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
        { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
+       { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_mobile }, /* Comet Lake PCH-U AHCI */
        { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
index 1e0a6b19ae0dde59e29328b63f331bd0d3c507f3..67d65ac785e9dcc585dd769503b13dceaaf116a9 100644 (file)
@@ -3754,11 +3754,7 @@ static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
                               enum rbd_notify_op notify_op)
 {
-       struct page **reply_pages;
-       size_t reply_len;
-
-       __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
-       ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
+       __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
 }
 
 static void rbd_notify_acquired_lock(struct work_struct *work)
@@ -4527,6 +4523,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
        cancel_work_sync(&rbd_dev->unlock_work);
 }
 
+/*
+ * header_rwsem must not be held to avoid a deadlock with
+ * rbd_dev_refresh() when flushing notifies.
+ */
 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
 {
        cancel_tasks_sync(rbd_dev);
@@ -6894,9 +6894,10 @@ static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
 
 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
 {
-       rbd_dev_unprobe(rbd_dev);
-       if (rbd_dev->opts)
+       if (!rbd_is_ro(rbd_dev))
                rbd_unregister_watch(rbd_dev);
+
+       rbd_dev_unprobe(rbd_dev);
        rbd_dev->image_format = 0;
        kfree(rbd_dev->spec->image_id);
        rbd_dev->spec->image_id = NULL;
@@ -6907,6 +6908,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
  * device.  If this image is the one being mapped (i.e., not a
  * parent), initiate a watch on its header object before using that
  * object to get detailed information about the rbd image.
+ *
+ * On success, returns with header_rwsem held for write if called
+ * with @depth == 0.
  */
 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
 {
@@ -6936,11 +6940,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
                }
        }
 
+       if (!depth)
+               down_write(&rbd_dev->header_rwsem);
+
        ret = rbd_dev_header_info(rbd_dev);
        if (ret) {
                if (ret == -ENOENT && !need_watch)
                        rbd_print_dne(rbd_dev, false);
-               goto err_out_watch;
+               goto err_out_probe;
        }
 
        /*
@@ -6985,10 +6992,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
        return 0;
 
 err_out_probe:
-       rbd_dev_unprobe(rbd_dev);
-err_out_watch:
+       if (!depth)
+               up_write(&rbd_dev->header_rwsem);
        if (need_watch)
                rbd_unregister_watch(rbd_dev);
+       rbd_dev_unprobe(rbd_dev);
 err_out_format:
        rbd_dev->image_format = 0;
        kfree(rbd_dev->spec->image_id);
@@ -7050,12 +7058,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
                goto err_out_rbd_dev;
        }
 
-       down_write(&rbd_dev->header_rwsem);
        rc = rbd_dev_image_probe(rbd_dev, 0);
-       if (rc < 0) {
-               up_write(&rbd_dev->header_rwsem);
+       if (rc < 0)
                goto err_out_rbd_dev;
-       }
 
        if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
                rbd_warn(rbd_dev, "alloc_size adjusted to %u",
index 536b59aabd2cba18dab1656e66590628b08b1f6e..bacebd457e6f34697e546b7b7ea12a1cc8960427 100644 (file)
@@ -276,7 +276,7 @@ static void __init asm9260_acc_init(struct device_node *np)
 
        /* TODO: Convert to DT parent scheme */
        ref_clk = of_clk_get_parent_name(np, 0);
-       hw = __clk_hw_register_fixed_rate_with_accuracy(NULL, NULL, pll_clk,
+       hw = __clk_hw_register_fixed_rate(NULL, NULL, pll_clk,
                        ref_clk, NULL, NULL, 0, rate, 0,
                        CLK_FIXED_RATE_PARENT_ACCURACY);
 
index 7077be293871193c94b780f73949f065d0002754..962014cfdc44e02e5314faa27896e81b9229593e 100644 (file)
@@ -97,7 +97,7 @@ static const struct clk_ops mmp_clk_pll_ops = {
        .recalc_rate = mmp_clk_pll_recalc_rate,
 };
 
-struct clk *mmp_clk_register_pll(char *name,
+static struct clk *mmp_clk_register_pll(char *name,
                        unsigned long default_rate,
                        void __iomem *enable_reg, u32 enable,
                        void __iomem *reg, u8 shift,
@@ -137,3 +137,34 @@ struct clk *mmp_clk_register_pll(char *name,
 
        return clk;
 }
+
+void mmp_register_pll_clks(struct mmp_clk_unit *unit,
+                       struct mmp_param_pll_clk *clks,
+                       void __iomem *base, int size)
+{
+       struct clk *clk;
+       int i;
+
+       for (i = 0; i < size; i++) {
+               void __iomem *reg = NULL;
+
+               if (clks[i].offset)
+                       reg = base + clks[i].offset;
+
+               clk = mmp_clk_register_pll(clks[i].name,
+                                       clks[i].default_rate,
+                                       base + clks[i].enable_offset,
+                                       clks[i].enable,
+                                       reg, clks[i].shift,
+                                       clks[i].input_rate,
+                                       base + clks[i].postdiv_offset,
+                                       clks[i].postdiv_shift);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+               if (clks[i].id)
+                       unit->clk_table[clks[i].id] = clk;
+       }
+}
index 317123641d1ed0be3ac73b41c9880d418c5119f5..ca7d37e2c7be6bb6aba2a7d4e1459d8c4369fcca 100644 (file)
@@ -176,37 +176,6 @@ void mmp_register_div_clks(struct mmp_clk_unit *unit,
        }
 }
 
-void mmp_register_pll_clks(struct mmp_clk_unit *unit,
-                       struct mmp_param_pll_clk *clks,
-                       void __iomem *base, int size)
-{
-       struct clk *clk;
-       int i;
-
-       for (i = 0; i < size; i++) {
-               void __iomem *reg = NULL;
-
-               if (clks[i].offset)
-                       reg = base + clks[i].offset;
-
-               clk = mmp_clk_register_pll(clks[i].name,
-                                       clks[i].default_rate,
-                                       base + clks[i].enable_offset,
-                                       clks[i].enable,
-                                       reg, clks[i].shift,
-                                       clks[i].input_rate,
-                                       base + clks[i].postdiv_offset,
-                                       clks[i].postdiv_shift);
-               if (IS_ERR(clk)) {
-                       pr_err("%s: failed to register clock %s\n",
-                              __func__, clks[i].name);
-                       continue;
-               }
-               if (clks[i].id)
-                       unit->clk_table[clks[i].id] = clk;
-       }
-}
-
 void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
                        struct clk *clk)
 {
index 971b4d6d992fb774e4730e343aca7d34739c997a..20dc1e5dd7564367b8f0601feb30a98914984720 100644 (file)
@@ -238,13 +238,6 @@ void mmp_register_pll_clks(struct mmp_clk_unit *unit,
                        struct mmp_param_pll_clk *clks,
                        void __iomem *base, int size);
 
-extern struct clk *mmp_clk_register_pll(char *name,
-                       unsigned long default_rate,
-                       void __iomem *enable_reg, u32 enable,
-                       void __iomem *reg, u8 shift,
-                       unsigned long input_rate,
-                       void __iomem *postdiv_reg, u8 postdiv_shift);
-
 #define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc)    \
 {                                                      \
        .width_div = (w_d),                             \
index a0631f7756cfc6c79aaee2462458d6c82b9cf98f..2e2dfb2d48fff271662129eef32853c1ac6833c8 100644 (file)
@@ -1641,8 +1641,9 @@ static SPRD_SC_GATE_CLK_FW_NAME(i2c4_eb,  "i2c4-eb",      "ext-26m", 0x0,
                                0x1000, BIT(12), 0, 0);
 static SPRD_SC_GATE_CLK_FW_NAME(uart0_eb,      "uart0-eb",     "ext-26m", 0x0,
                                0x1000, BIT(13), 0, 0);
+/* uart1_eb is for console, don't gate even if unused */
 static SPRD_SC_GATE_CLK_FW_NAME(uart1_eb,      "uart1-eb",     "ext-26m", 0x0,
-                               0x1000, BIT(14), 0, 0);
+                               0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
 static SPRD_SC_GATE_CLK_FW_NAME(uart2_eb,      "uart2-eb",     "ext-26m", 0x0,
                                0x1000, BIT(15), 0, 0);
 static SPRD_SC_GATE_CLK_FW_NAME(uart3_eb,      "uart3-eb",     "ext-26m", 0x0,
index 9c190026bfab1f9b34169206a6034a5b63384603..995e05f609ff0b549026cb33d465e210aa174f24 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_UDMABUF)           += udmabuf.o
 
 dmabuf_selftests-y := \
        selftest.o \
-       st-dma-fence.o
+       st-dma-fence.o \
+       st-dma-fence-chain.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
index ccc9eda1bc282851a6b4d2777297de271b03b029..570c923023e65dba5b83b78b7963f7315ca06cb0 100644 (file)
@@ -690,6 +690,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 
        attach->dev = dev;
        attach->dmabuf = dmabuf;
+       if (importer_ops)
+               attach->peer2peer = importer_ops->allow_peer2peer;
        attach->importer_ops = importer_ops;
        attach->importer_priv = importer_priv;
 
index 44a741677d2524c88d6a3cc056903a4bb31487bc..c435bbba851c774727b152c2eed875aab56e78d6 100644 (file)
@@ -62,7 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
                        replacement = NULL;
                }
 
-               tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+               tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
+                             prev, replacement);
                if (tmp == prev)
                        dma_fence_put(tmp);
                else
@@ -98,6 +99,12 @@ int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
                return -EINVAL;
 
        dma_fence_chain_for_each(*pfence, &chain->base) {
+               if ((*pfence)->seqno < seqno) { /* already signaled */
+                       dma_fence_put(*pfence);
+                       *pfence = NULL;
+                       break;
+               }
+
                if ((*pfence)->context != chain->base.context ||
                    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
                        break;
@@ -221,6 +228,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
  * @chain: the chain node to initialize
  * @prev: the previous fence
  * @fence: the current fence
+ * @seqno: the sequence number (syncpt) of the fence within the chain
  *
  * Initialize a new chain node and either start a new chain or add the node to
  * the existing chain of the previous fence.
index 052a41e2451c182068164816d35c14580c789386..90edf2b281b049381ebfc97d467b15e207a16b83 100644 (file)
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(dma_fence_get_stub);
 u64 dma_fence_context_alloc(unsigned num)
 {
        WARN_ON(!num);
-       return atomic64_add_return(num, &dma_fence_context_counter) - num;
+       return atomic64_fetch_add(num, &dma_fence_context_counter);
 }
 EXPORT_SYMBOL(dma_fence_context_alloc);
 
index 5320386f02e5f3f721f8e2e551475bcd6078173c..55918ef9adab25a33d3ca7f56186ac27729af317 100644 (file)
@@ -11,3 +11,4 @@
  */
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
+selftest(dma_fence_chain, dma_fence_chain)
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
new file mode 100644 (file)
index 0000000..5d45ba7
--- /dev/null
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static struct kmem_cache *slab_fences;
+
+static inline struct mock_fence {
+       struct dma_fence base;
+       spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+       return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+       return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+       kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+       .get_driver_name = mock_name,
+       .get_timeline_name = mock_name,
+       .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+       struct mock_fence *f;
+
+       f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+       if (!f)
+               return NULL;
+
+       spin_lock_init(&f->lock);
+       dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+       return &f->base;
+}
+
+static inline struct mock_chain {
+       struct dma_fence_chain base;
+} *to_mock_chain(struct dma_fence *f) {
+       return container_of(f, struct mock_chain, base.base);
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+                                   struct dma_fence *fence,
+                                   u64 seqno)
+{
+       struct mock_chain *f;
+
+       f = kmalloc(sizeof(*f), GFP_KERNEL);
+       if (!f)
+               return NULL;
+
+       dma_fence_chain_init(&f->base,
+                            dma_fence_get(prev),
+                            dma_fence_get(fence),
+                            seqno);
+
+       return &f->base.base;
+}
+
+static int sanitycheck(void *arg)
+{
+       struct dma_fence *f, *chain;
+       int err = 0;
+
+       f = mock_fence();
+       if (!f)
+               return -ENOMEM;
+
+       chain = mock_chain(NULL, f, 1);
+       if (!chain)
+               err = -ENOMEM;
+
+       dma_fence_signal(f);
+       dma_fence_put(f);
+
+       dma_fence_put(chain);
+
+       return err;
+}
+
+struct fence_chains {
+       unsigned int chain_length;
+       struct dma_fence **fences;
+       struct dma_fence **chains;
+
+       struct dma_fence *tail;
+};
+
+static uint64_t seqno_inc(unsigned int i)
+{
+       return i + 1;
+}
+
+static int fence_chains_init(struct fence_chains *fc, unsigned int count,
+                            uint64_t (*seqno_fn)(unsigned int))
+{
+       unsigned int i;
+       int err = 0;
+
+       fc->chains = kvmalloc_array(count, sizeof(*fc->chains),
+                                   GFP_KERNEL | __GFP_ZERO);
+       if (!fc->chains)
+               return -ENOMEM;
+
+       fc->fences = kvmalloc_array(count, sizeof(*fc->fences),
+                                   GFP_KERNEL | __GFP_ZERO);
+       if (!fc->fences) {
+               err = -ENOMEM;
+               goto err_chains;
+       }
+
+       fc->tail = NULL;
+       for (i = 0; i < count; i++) {
+               fc->fences[i] = mock_fence();
+               if (!fc->fences[i]) {
+                       err = -ENOMEM;
+                       goto unwind;
+               }
+
+               fc->chains[i] = mock_chain(fc->tail,
+                                          fc->fences[i],
+                                          seqno_fn(i));
+               if (!fc->chains[i]) {
+                       err = -ENOMEM;
+                       goto unwind;
+               }
+
+               fc->tail = fc->chains[i];
+       }
+
+       fc->chain_length = i;
+       return 0;
+
+unwind:
+       for (i = 0; i < count; i++) {
+               dma_fence_put(fc->fences[i]);
+               dma_fence_put(fc->chains[i]);
+       }
+       kvfree(fc->fences);
+err_chains:
+       kvfree(fc->chains);
+       return err;
+}
+
+static void fence_chains_fini(struct fence_chains *fc)
+{
+       unsigned int i;
+
+       for (i = 0; i < fc->chain_length; i++) {
+               dma_fence_signal(fc->fences[i]);
+               dma_fence_put(fc->fences[i]);
+       }
+       kvfree(fc->fences);
+
+       for (i = 0; i < fc->chain_length; i++)
+               dma_fence_put(fc->chains[i]);
+       kvfree(fc->chains);
+}
+
+static int find_seqno(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc);
+       if (err)
+               return err;
+
+       fence = dma_fence_get(fc.tail);
+       err = dma_fence_chain_find_seqno(&fence, 0);
+       dma_fence_put(fence);
+       if (err) {
+               pr_err("Reported %d for find_seqno(0)!\n", err);
+               goto err;
+       }
+
+       for (i = 0; i < fc.chain_length; i++) {
+               fence = dma_fence_get(fc.tail);
+               err = dma_fence_chain_find_seqno(&fence, i + 1);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Reported %d for find_seqno(%d:%d)!\n",
+                              err, fc.chain_length + 1, i + 1);
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+                              fc.chain_length + 1, i + 1);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, i + 1);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Error reported for finding self\n");
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence reported by find self\n");
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, i + 2);
+               dma_fence_put(fence);
+               if (!err) {
+                       pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",
+                              i + 1, i + 2);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, i);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Error reported for previous fence!\n");
+                       goto err;
+               }
+               if (i > 0 && fence != fc.chains[i - 1]) {
+                       pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+                              i + 1, i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int find_signaled(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+
+       err = fence_chains_init(&fc, 2, seqno_inc);
+       if (err)
+               return err;
+
+       dma_fence_signal(fc.fences[0]);
+
+       fence = dma_fence_get(fc.tail);
+       err = dma_fence_chain_find_seqno(&fence, 1);
+       dma_fence_put(fence);
+       if (err) {
+               pr_err("Reported %d for find_seqno()!\n", err);
+               goto err;
+       }
+
+       if (fence && fence != fc.chains[0]) {
+               pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n",
+                      fence->seqno);
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, 1);
+               dma_fence_put(fence);
+               if (err)
+                       pr_err("Reported %d for finding self!\n", err);
+
+               err = -EINVAL;
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int find_out_of_order(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+
+       err = fence_chains_init(&fc, 3, seqno_inc);
+       if (err)
+               return err;
+
+       dma_fence_signal(fc.fences[1]);
+
+       fence = dma_fence_get(fc.tail);
+       err = dma_fence_chain_find_seqno(&fence, 2);
+       dma_fence_put(fence);
+       if (err) {
+               pr_err("Reported %d for find_seqno()!\n", err);
+               goto err;
+       }
+
+       if (fence && fence != fc.chains[1]) {
+               pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n",
+                      fence->seqno);
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, 2);
+               dma_fence_put(fence);
+               if (err)
+                       pr_err("Reported %d for finding self!\n", err);
+
+               err = -EINVAL;
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static uint64_t seqno_inc2(unsigned int i)
+{
+       return 2 * i + 2;
+}
+
+static int find_gap(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc2);
+       if (err)
+               return err;
+
+       for (i = 0; i < fc.chain_length; i++) {
+               fence = dma_fence_get(fc.tail);
+               err = dma_fence_chain_find_seqno(&fence, 2 * i + 1);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Reported %d for find_seqno(%d:%d)!\n",
+                              err, fc.chain_length + 1, 2 * i + 1);
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n",
+                              fence->seqno,
+                              fc.chain_length + 1,
+                              2 * i + 1);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, 2 * i + 2);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Error reported for finding self\n");
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence reported by find self\n");
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+struct find_race {
+       struct fence_chains fc;
+       atomic_t children;
+};
+
+static int __find_race(void *arg)
+{
+       struct find_race *data = arg;
+       int err = 0;
+
+       while (!kthread_should_stop()) {
+               struct dma_fence *fence = dma_fence_get(data->fc.tail);
+               int seqno;
+
+               seqno = prandom_u32_max(data->fc.chain_length) + 1;
+
+               err = dma_fence_chain_find_seqno(&fence, seqno);
+               if (err) {
+                       pr_err("Failed to find fence seqno:%d\n",
+                              seqno);
+                       dma_fence_put(fence);
+                       break;
+               }
+               if (!fence)
+                       goto signal;
+
+               err = dma_fence_chain_find_seqno(&fence, seqno);
+               if (err) {
+                       pr_err("Reported an invalid fence for find-self:%d\n",
+                              seqno);
+                       dma_fence_put(fence);
+                       break;
+               }
+
+               if (fence->seqno < seqno) {
+                       pr_err("Reported an earlier fence.seqno:%lld for seqno:%d\n",
+                              fence->seqno, seqno);
+                       err = -EINVAL;
+                       dma_fence_put(fence);
+                       break;
+               }
+
+               dma_fence_put(fence);
+
+signal:
+               seqno = prandom_u32_max(data->fc.chain_length - 1);
+               dma_fence_signal(data->fc.fences[seqno]);
+               cond_resched();
+       }
+
+       if (atomic_dec_and_test(&data->children))
+               wake_up_var(&data->children);
+       return err;
+}
+
+static int find_race(void *arg)
+{
+       struct find_race data;
+       int ncpus = num_online_cpus();
+       struct task_struct **threads;
+       unsigned long count;
+       int err;
+       int i;
+
+       err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+       if (!threads) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       atomic_set(&data.children, 0);
+       for (i = 0; i < ncpus; i++) {
+               threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i);
+               if (IS_ERR(threads[i])) {
+                       ncpus = i;
+                       break;
+               }
+               atomic_inc(&data.children);
+               get_task_struct(threads[i]);
+       }
+
+       wait_var_event_timeout(&data.children,
+                              !atomic_read(&data.children),
+                              5 * HZ);
+
+       for (i = 0; i < ncpus; i++) {
+               int ret;
+
+               ret = kthread_stop(threads[i]);
+               if (ret && !err)
+                       err = ret;
+               put_task_struct(threads[i]);
+       }
+       kfree(threads);
+
+       count = 0;
+       for (i = 0; i < data.fc.chain_length; i++)
+               if (dma_fence_is_signaled(data.fc.fences[i]))
+                       count++;
+       pr_info("Completed %lu cycles\n", count);
+
+err:
+       fence_chains_fini(&data.fc);
+       return err;
+}
+
+static int signal_forward(void *arg)
+{
+       struct fence_chains fc;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc);
+       if (err)
+               return err;
+
+       for (i = 0; i < fc.chain_length; i++) {
+               dma_fence_signal(fc.fences[i]);
+
+               if (!dma_fence_is_signaled(fc.chains[i])) {
+                       pr_err("chain[%d] not signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               if (i + 1 < fc.chain_length &&
+                   dma_fence_is_signaled(fc.chains[i + 1])) {
+                       pr_err("chain[%d] is signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int signal_backward(void *arg)
+{
+       struct fence_chains fc;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc);
+       if (err)
+               return err;
+
+       for (i = fc.chain_length; i--; ) {
+               dma_fence_signal(fc.fences[i]);
+
+               if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
+                       pr_err("chain[%d] is signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       for (i = 0; i < fc.chain_length; i++) {
+               if (!dma_fence_is_signaled(fc.chains[i])) {
+                       pr_err("chain[%d] was not signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int __wait_fence_chains(void *arg)
+{
+       struct fence_chains *fc = arg;
+
+       if (dma_fence_wait(fc->tail, false))
+               return -EIO;
+
+       return 0;
+}
+
+static int wait_forward(void *arg)
+{
+       struct fence_chains fc;
+       struct task_struct *tsk;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+       if (IS_ERR(tsk)) {
+               err = PTR_ERR(tsk);
+               goto err;
+       }
+       get_task_struct(tsk);
+       yield_to(tsk, true);
+
+       for (i = 0; i < fc.chain_length; i++)
+               dma_fence_signal(fc.fences[i]);
+
+       err = kthread_stop(tsk);
+       put_task_struct(tsk);
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int wait_backward(void *arg)
+{
+       struct fence_chains fc;
+       struct task_struct *tsk;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+       if (IS_ERR(tsk)) {
+               err = PTR_ERR(tsk);
+               goto err;
+       }
+       get_task_struct(tsk);
+       yield_to(tsk, true);
+
+       for (i = fc.chain_length; i--; )
+               dma_fence_signal(fc.fences[i]);
+
+       err = kthread_stop(tsk);
+       put_task_struct(tsk);
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static void randomise_fences(struct fence_chains *fc)
+{
+       unsigned int count = fc->chain_length;
+
+       /* Fisher-Yates shuffle courtesy of Knuth */
+       while (--count) {
+               unsigned int swp;
+
+               swp = prandom_u32_max(count + 1);
+               if (swp == count)
+                       continue;
+
+               swap(fc->fences[count], fc->fences[swp]);
+       }
+}
+
+static int wait_random(void *arg)
+{
+       struct fence_chains fc;
+       struct task_struct *tsk;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       randomise_fences(&fc);
+
+       tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+       if (IS_ERR(tsk)) {
+               err = PTR_ERR(tsk);
+               goto err;
+       }
+       get_task_struct(tsk);
+       yield_to(tsk, true);
+
+       for (i = 0; i < fc.chain_length; i++)
+               dma_fence_signal(fc.fences[i]);
+
+       err = kthread_stop(tsk);
+       put_task_struct(tsk);
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+int dma_fence_chain(void)
+{
+       static const struct subtest tests[] = {
+               SUBTEST(sanitycheck),
+               SUBTEST(find_seqno),
+               SUBTEST(find_signaled),
+               SUBTEST(find_out_of_order),
+               SUBTEST(find_gap),
+               SUBTEST(find_race),
+               SUBTEST(signal_forward),
+               SUBTEST(signal_backward),
+               SUBTEST(wait_forward),
+               SUBTEST(wait_backward),
+               SUBTEST(wait_random),
+       };
+       int ret;
+
+       pr_info("sizeof(dma_fence_chain)=%zu\n",
+               sizeof(struct dma_fence_chain));
+
+       slab_fences = KMEM_CACHE(mock_fence,
+                                SLAB_TYPESAFE_BY_RCU |
+                                SLAB_HWCACHE_ALIGN);
+       if (!slab_fences)
+               return -ENOMEM;
+
+       ret = subtests(tests, NULL);
+
+       kmem_cache_destroy(slab_fences);
+       return ret;
+}
index b1af0de2e10080ef8080183d0797436217520b28..9d2512913d2587361b576893897969d73e24d029 100644 (file)
@@ -101,7 +101,7 @@ void cper_print_bits(const char *pfx, unsigned int bits,
                if (!len)
                        len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
                else
-                       len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
+                       len += scnprintf(buf+len, sizeof(buf)-len, ", %s", str);
        }
        if (len)
                printk("%s\n", buf);
index cc90a748bcf0208a6a7c6cfc5b7815f2487dfe7f..67d26949fd26df4a90ed2a6793d55940b46cef6c 100644 (file)
@@ -25,7 +25,7 @@
 #define EFI_ALLOC_ALIGN                EFI_PAGE_SIZE
 #endif
 
-#ifdef CONFIG_ARM
+#if defined(CONFIG_ARM) || defined(CONFIG_X86)
 #define __efistub_global       __section(.data)
 #else
 #define __efistub_global
index d4c7e5f59d2c8eaa63d34967b0c28c799aa39449..ea66b1f16a79d17b4b5bf1cce7a92042a296af69 100644 (file)
  */
 #define EFI_READ_CHUNK_SIZE    SZ_1M
 
+struct finfo {
+       efi_file_info_t info;
+       efi_char16_t    filename[MAX_FILENAME_SIZE];
+};
+
 static efi_status_t efi_open_file(efi_file_protocol_t *volume,
-                                 efi_char16_t *filename_16,
+                                 struct finfo *fi,
                                  efi_file_protocol_t **handle,
                                  unsigned long *file_size)
 {
-       struct {
-               efi_file_info_t info;
-               efi_char16_t    filename[MAX_FILENAME_SIZE];
-       } finfo;
        efi_guid_t info_guid = EFI_FILE_INFO_ID;
        efi_file_protocol_t *fh;
        unsigned long info_sz;
        efi_status_t status;
 
-       status = volume->open(volume, &fh, filename_16, EFI_FILE_MODE_READ, 0);
+       status = volume->open(volume, &fh, fi->filename, EFI_FILE_MODE_READ, 0);
        if (status != EFI_SUCCESS) {
                pr_efi_err("Failed to open file: ");
-               efi_char16_printk(filename_16);
+               efi_char16_printk(fi->filename);
                efi_printk("\n");
                return status;
        }
 
-       info_sz = sizeof(finfo);
-       status = fh->get_info(fh, &info_guid, &info_sz, &finfo);
+       info_sz = sizeof(struct finfo);
+       status = fh->get_info(fh, &info_guid, &info_sz, fi);
        if (status != EFI_SUCCESS) {
                pr_efi_err("Failed to get file info\n");
                fh->close(fh);
@@ -60,7 +61,7 @@ static efi_status_t efi_open_file(efi_file_protocol_t *volume,
        }
 
        *handle = fh;
-       *file_size = finfo.info.file_size;
+       *file_size = fi->info.file_size;
        return EFI_SUCCESS;
 }
 
@@ -146,13 +147,13 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
 
        alloc_addr = alloc_size = 0;
        do {
-               efi_char16_t filename[MAX_FILENAME_SIZE];
+               struct finfo fi;
                unsigned long size;
                void *addr;
 
                offset = find_file_option(cmdline, cmdline_len,
                                          optstr, optstr_size,
-                                         filename, ARRAY_SIZE(filename));
+                                         fi.filename, ARRAY_SIZE(fi.filename));
 
                if (!offset)
                        break;
@@ -166,7 +167,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
                                return status;
                }
 
-               status = efi_open_file(volume, filename, &file, &size);
+               status = efi_open_file(volume, &fi, &file, &size);
                if (status != EFI_SUCCESS)
                        goto err_close_volume;
 
index 8d3a707789de83fd3c713a081505dbd8a2b33346..05ccb229fb45ffbb24df88bbb1f90ffb5ec73f77 100644 (file)
@@ -20,7 +20,7 @@
 /* Maximum physical address for 64-bit kernel with 4-level paging */
 #define MAXMEM_X86_64_4LEVEL (1ull << 46)
 
-static efi_system_table_t *sys_table;
+static efi_system_table_t *sys_table __efistub_global;
 extern const bool efi_is64;
 extern u32 image_offset;
 
@@ -392,8 +392,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
        image_base = efi_table_attr(image, image_base);
        image_offset = (void *)startup_32 - image_base;
 
-       hdr = &((struct boot_params *)image_base)->hdr;
-
        status = efi_allocate_pages(0x4000, (unsigned long *)&boot_params, ULONG_MAX);
        if (status != EFI_SUCCESS) {
                efi_printk("Failed to allocate lowmem for boot params\n");
@@ -742,8 +740,15 @@ unsigned long efi_main(efi_handle_t handle,
         * now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
         * KASLR uses.
         *
-        * Also relocate it if image_offset is zero, i.e. we weren't loaded by
-        * LoadImage, but we are not aligned correctly.
+        * Also relocate it if image_offset is zero, i.e. the kernel wasn't
+        * loaded by LoadImage, but rather by a bootloader that called the
+        * handover entry. The reason we must always relocate in this case is
+        * to handle the case of systemd-boot booting a unified kernel image,
+        * which is a PE executable that contains the bzImage and an initrd as
+        * COFF sections. The initrd section is placed after the bzImage
+        * without ensuring that there are at least init_size bytes available
+        * for the bzImage, and thus the compressed kernel's startup code may
+        * overwrite the initrd unless it is moved out of the way.
         */
 
        buffer_start = ALIGN(bzimage_addr - image_offset,
@@ -753,8 +758,7 @@ unsigned long efi_main(efi_handle_t handle,
        if ((buffer_start < LOAD_PHYSICAL_ADDR)                              ||
            (IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE)    ||
            (IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
-           (image_offset == 0 && !IS_ALIGNED(bzimage_addr,
-                                             hdr->kernel_alignment))) {
+           (image_offset == 0)) {
                status = efi_relocate_kernel(&bzimage_addr,
                                             hdr->init_size, hdr->init_size,
                                             hdr->pref_address,
index 43594978958e804807a3d681869ca4963100570c..4f4e7fa001c15b0b47272772096a8d7865b1b31e 100644 (file)
@@ -310,8 +310,6 @@ source "drivers/gpu/drm/ast/Kconfig"
 
 source "drivers/gpu/drm/mgag200/Kconfig"
 
-source "drivers/gpu/drm/cirrus/Kconfig"
-
 source "drivers/gpu/drm/armada/Kconfig"
 
 source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
index 7f72ef5e781113d6fed2438ab4d9f126bed1facf..2c0e5a7e595362a5176a148d6d8d2770fd891804 100644 (file)
@@ -17,7 +17,8 @@ drm-y       :=        drm_auth.o drm_cache.o \
                drm_plane.o drm_color_mgmt.o drm_print.o \
                drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
                drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
-               drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o
+               drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
+               drm_managed.o
 
 drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
@@ -32,8 +33,7 @@ drm-$(CONFIG_PCI) += drm_pci.o
 drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
 drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 
-drm_vram_helper-y := drm_gem_vram_helper.o \
-                    drm_vram_helper_common.o
+drm_vram_helper-y := drm_gem_vram_helper.o
 obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
 
 drm_ttm_helper-y := drm_gem_ttm_helper.o
@@ -74,7 +74,6 @@ obj-$(CONFIG_DRM_I915)        += i915/
 obj-$(CONFIG_DRM_MGAG200) += mgag200/
 obj-$(CONFIG_DRM_V3D)  += v3d/
 obj-$(CONFIG_DRM_VC4)  += vc4/
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
 obj-$(CONFIG_DRM_SIS)   += sis/
 obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
index c2bbcdd9c875c25efac9bdc7cf63fd249d306c0e..210d57a4afc812b0c0b6d4b3d4a8d38f89afc159 100644 (file)
@@ -55,7 +55,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
        amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
        amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
-       amdgpu_umc.o smu_v11_0_i2c.o
+       amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o
 
 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
 
index 2992a49ad4a57d0c34ef0c46addf54b681f3ad2e..7975f8e157df22c4bf62f155638182dea142e86f 100644 (file)
 #ifndef __AMDGPU_H__
 #define __AMDGPU_H__
 
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "amdgpu: " fmt
+
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+
+#define dev_fmt(fmt) "amdgpu: " fmt
+
 #include "amdgpu_ctx.h"
 
 #include <linux/atomic.h>
@@ -177,6 +189,8 @@ extern int sched_policy;
 static const int sched_policy = KFD_SCHED_POLICY_HWS;
 #endif
 
+extern int amdgpu_tmz;
+
 #ifdef CONFIG_DRM_AMDGPU_SI
 extern int amdgpu_si_support;
 #endif
@@ -190,8 +204,6 @@ extern int amdgpu_cik_support;
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT           (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE                    16
 #define AMDGPU_DEBUGFS_MAX_COMPONENTS          32
 #define AMDGPUFB_CONN_LIMIT                    4
 #define AMDGPU_BIOS_NUM_SCRATCH                        16
@@ -439,7 +451,9 @@ struct amdgpu_fpriv {
 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                 unsigned size, struct amdgpu_ib *ib);
+                 unsigned size,
+                 enum amdgpu_ib_pool_type pool,
+                 struct amdgpu_ib *ib);
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
                    struct dma_fence *f);
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
@@ -512,7 +526,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
 /*
  * Writeback
  */
-#define AMDGPU_MAX_WB 128      /* Reserve at most 128 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 256      /* Reserve at most 256 WB slots for amdgpu-owned rings. */
 
 struct amdgpu_wb {
        struct amdgpu_bo        *wb_obj;
@@ -751,7 +765,6 @@ struct amdgpu_device {
        uint8_t                         *bios;
        uint32_t                        bios_size;
        struct amdgpu_bo                *stolen_vga_memory;
-       struct amdgpu_bo                *discovery_memory;
        uint32_t                        bios_scratch_reg_offset;
        uint32_t                        bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
 
@@ -843,7 +856,8 @@ struct amdgpu_device {
        unsigned                        num_rings;
        struct amdgpu_ring              *rings[AMDGPU_MAX_RINGS];
        bool                            ib_pool_ready;
-       struct amdgpu_sa_manager        ring_tmp_bo;
+       struct amdgpu_sa_manager        ib_pools[AMDGPU_IB_POOL_MAX];
+       struct amdgpu_sched             gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
 
        /* interrupts */
        struct amdgpu_irq               irq;
@@ -903,7 +917,9 @@ struct amdgpu_device {
        struct amdgpu_display_manager dm;
 
        /* discovery */
-       uint8_t                         *discovery;
+       uint8_t                         *discovery_bin;
+       uint32_t                        discovery_tmr_size;
+       struct amdgpu_bo                *discovery_memory;
 
        /* mes */
        bool                            enable_mes;
@@ -923,7 +939,7 @@ struct amdgpu_device {
        atomic64_t gart_pin_size;
 
        /* soc15 register offset based on ip, instance and  segment */
-       uint32_t                *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+       uint32_t                *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
        /* delayed work_func for deferring clockgating during resume */
        struct delayed_work     delayed_init_work;
@@ -935,9 +951,6 @@ struct amdgpu_device {
        /* link all shadow bo */
        struct list_head                shadow_list;
        struct mutex                    shadow_list_lock;
-       /* keep an lru list of rings by HW IP */
-       struct list_head                ring_lru_list;
-       spinlock_t                      ring_lru_list_lock;
 
        /* record hw reset is performed */
        bool has_hw_reset;
@@ -945,9 +958,8 @@ struct amdgpu_device {
 
        /* s3/s4 mask */
        bool                            in_suspend;
+       bool                            in_hibernate;
 
-       /* record last mm index being written through WREG32*/
-       unsigned long last_mm_index;
        bool                            in_gpu_reset;
        enum pp_mp1_state               mp1_state;
        struct mutex  lock_reset;
@@ -966,14 +978,19 @@ struct amdgpu_device {
        uint64_t                        unique_id;
        uint64_t        df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
 
-       /* device pstate */
-       int                             pstate;
        /* enable runtime pm on the device */
        bool                            runpm;
        bool                            in_runpm;
 
        bool                            pm_sysfs_en;
        bool                            ucode_sysfs_en;
+
+       /* Chip product information */
+       char                            product_number[16];
+       char                            product_name[32];
+       char                            serial[16];
+
+       struct amdgpu_autodump          autodump;
 };
 
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -990,10 +1007,10 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
 
 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
                               uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+                           uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                        uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags);
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    uint32_t acc_flags);
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
@@ -1010,25 +1027,20 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 /*
  * Registers read & write functions.
  */
-
-#define AMDGPU_REGS_IDX       (1<<0)
 #define AMDGPU_REGS_NO_KIQ    (1<<1)
-#define AMDGPU_REGS_KIQ       (1<<2)
 
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
 
-#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
-#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
+#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
 
 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
 
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
-#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1065,7 +1077,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
                tmp_ |= ((val) & ~(mask));                      \
                WREG32_PLL(reg, tmp_);                          \
        } while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
 
@@ -1248,5 +1260,9 @@ _name##_show(struct device *dev,                                  \
                                                                        \
 static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
 
-#endif
+static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
+{
+       return adev->gmc.tmz_enabled;
+}
 
+#endif
index 1e41367ef74ee12c4181cc120e94cd1f6f3c5706..956cbbda479356fb00f263e6d45d044b1de96761 100644 (file)
@@ -444,7 +444,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
 
                DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
 
-               /* todo: add DC handling */
                if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
                    !amdgpu_device_has_dc_support(adev)) {
                        struct amdgpu_encoder *enc = atif->encoder_for_bl;
@@ -463,6 +462,27 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
 #endif
                        }
                }
+#if defined(CONFIG_DRM_AMD_DC)
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+               if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+                   amdgpu_device_has_dc_support(adev)) {
+                       struct amdgpu_display_manager *dm = &adev->dm;
+                       struct backlight_device *bd = dm->backlight_dev;
+
+                       if (bd) {
+                               DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+                                                req.backlight_level);
+
+                               /*
+                                * XXX backlight_device_set_brightness() is
+                                * hardwired to post BACKLIGHT_UPDATE_SYSFS.
+                                * It probably should accept 'reason' parameter.
+                                */
+                               backlight_device_set_brightness(bd, req.backlight_level);
+                       }
+               }
+#endif
+#endif
                if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
                        if (adev->flags & AMD_IS_PX) {
                                pm_runtime_get_sync(adev->ddev->dev);
index abfbe89e805ef2f4ef89585b12f41a9066273fd3..ad59ac4423b8bddb829d8a62d6101d85bdb0fe79 100644 (file)
@@ -564,6 +564,13 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
        return adev->gds.gws_size;
 }
 
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+       return adev->rev_id;
+}
+
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
                                uint32_t vmid, uint64_t gpu_addr,
                                uint32_t *ib_cmd, uint32_t ib_len)
index 13feb313e9b3922e53f764d02a1935aba7208c00..3f2b695cf19e2a2b8e55b29205e666f79394854b 100644 (file)
@@ -65,6 +65,7 @@ struct kgd_mem {
        struct amdgpu_sync sync;
 
        bool aql_queue;
+       bool is_imported;
 };
 
 /* KFD Memory Eviction */
@@ -148,6 +149,9 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
 
 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+                                       int queue_bit);
+
 /* Shared API */
 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                                void **mem_obj, uint64_t *gpu_addr,
@@ -175,6 +179,7 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
 
 /* Read user wptr from a specified user address space with page fault
@@ -218,7 +223,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                void *vm, struct kgd_mem **mem,
                uint64_t *offset, uint32_t flags);
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem);
+               struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
index 4ec6d0c0320107489a4b7072d68f056b48da915a..691c89705bcdc0aa1ab2fecbb0c8fad6acc28191 100644 (file)
@@ -543,6 +543,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        uint32_t temp;
        struct v10_compute_mqd *m = get_mqd(mqd);
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
 #if 0
        unsigned long flags;
        int retry;
index 9dff792c929036701e78af5f19f24463e9f6448b..da8b31a53291bbe1cd63cf172d93bd5bb8ebefde 100644 (file)
@@ -362,13 +362,13 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
        ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
                                        &param);
        if (ret) {
-               pr_err("amdgpu: failed to validate PT BOs\n");
+               pr_err("failed to validate PT BOs\n");
                return ret;
        }
 
        ret = amdgpu_amdkfd_validate(&param, pd);
        if (ret) {
-               pr_err("amdgpu: failed to validate PD\n");
+               pr_err("failed to validate PD\n");
                return ret;
        }
 
@@ -377,7 +377,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
        if (vm->use_cpu_for_update) {
                ret = amdgpu_bo_kmap(pd, NULL);
                if (ret) {
-                       pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
+                       pr_err("failed to kmap PD, ret=%d\n", ret);
                        return ret;
                }
        }
@@ -660,15 +660,15 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
                                     false, &ctx->duplicates);
-       if (!ret)
-               ctx->reserved = true;
-       else {
-               pr_err("Failed to reserve buffers in ttm\n");
+       if (ret) {
+               pr_err("Failed to reserve buffers in ttm.\n");
                kfree(ctx->vm_pd);
                ctx->vm_pd = NULL;
+               return ret;
        }
 
-       return ret;
+       ctx->reserved = true;
+       return 0;
 }
 
 /**
@@ -733,17 +733,15 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
                                     false, &ctx->duplicates);
-       if (!ret)
-               ctx->reserved = true;
-       else
-               pr_err("Failed to reserve buffers in ttm.\n");
-
        if (ret) {
+               pr_err("Failed to reserve buffers in ttm.\n");
                kfree(ctx->vm_pd);
                ctx->vm_pd = NULL;
+               return ret;
        }
 
-       return ret;
+       ctx->reserved = true;
+       return 0;
 }
 
 /**
@@ -1279,29 +1277,31 @@ err:
 }
 
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem)
+               struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
 {
        struct amdkfd_process_info *process_info = mem->process_info;
        unsigned long bo_size = mem->bo->tbo.mem.size;
        struct kfd_bo_va_list *entry, *tmp;
        struct bo_vm_reservation_context ctx;
        struct ttm_validate_buffer *bo_list_entry;
+       unsigned int mapped_to_gpu_memory;
        int ret;
+       bool is_imported = 0;
 
        mutex_lock(&mem->lock);
+       mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
+       is_imported = mem->is_imported;
+       mutex_unlock(&mem->lock);
+       /* lock is not needed after this, since mem is unused and will
+        * be freed anyway
+        */
 
-       if (mem->mapped_to_gpu_memory > 0) {
+       if (mapped_to_gpu_memory > 0) {
                pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
                                mem->va, bo_size);
-               mutex_unlock(&mem->lock);
                return -EBUSY;
        }
 
-       mutex_unlock(&mem->lock);
-       /* lock is not needed after this, since mem is unused and will
-        * be freed anyway
-        */
-
        /* No more MMU notifiers */
        amdgpu_mn_unregister(mem->bo);
 
@@ -1342,8 +1342,19 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                kfree(mem->bo->tbo.sg);
        }
 
+       /* Update the size of the BO being freed if it was allocated from
+        * VRAM and is not imported.
+        */
+       if (size) {
+               if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
+                   (!is_imported))
+                       *size = bo_size;
+               else
+                       *size = 0;
+       }
+
        /* Free the BO*/
-       amdgpu_bo_unref(&mem->bo);
+       drm_gem_object_put_unlocked(&mem->bo->tbo.base);
        mutex_destroy(&mem->lock);
        kfree(mem);
 
@@ -1688,7 +1699,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
                | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
                | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
 
-       (*mem)->bo = amdgpu_bo_ref(bo);
+       drm_gem_object_get(&bo->tbo.base);
+       (*mem)->bo = bo;
        (*mem)->va = va;
        (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
                AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
@@ -1696,6 +1708,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
        (*mem)->process_info = avm->process_info;
        add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
        amdgpu_sync_create(&(*mem)->sync);
+       (*mem)->is_imported = true;
 
        return 0;
 }
index d1495e1c92894168bc84e3f89decd27f31516cf2..d9b35df33806d178afbea6d44954396121bee021 100644 (file)
@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
        for (i = 0; i < n; i++) {
                struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
                r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
-                                      false, false);
+                                      false, false, false);
                if (r)
                        goto exit_do_move;
                r = dma_fence_wait(fence, false);
index 031b094607bdd5bb4682fa04ad477c56ad238ac0..78ac6dbe70d84e22c9e2497354df636416931752 100644 (file)
@@ -60,8 +60,6 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 {
        CGS_FUNC_ADEV;
        switch (space) {
-       case CGS_IND_REG__MMIO:
-               return RREG32_IDX(index);
        case CGS_IND_REG__PCIE:
                return RREG32_PCIE(index);
        case CGS_IND_REG__SMC:
@@ -77,6 +75,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return 0;
+       default:
+               BUG();
        }
        WARN(1, "Invalid indirect register space");
        return 0;
@@ -88,8 +88,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 {
        CGS_FUNC_ADEV;
        switch (space) {
-       case CGS_IND_REG__MMIO:
-               return WREG32_IDX(index, value);
        case CGS_IND_REG__PCIE:
                return WREG32_PCIE(index, value);
        case CGS_IND_REG__SMC:
@@ -105,6 +103,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return;
+       default:
+               BUG();
        }
        WARN(1, "Invalid indirect register space");
 }
index af91627b19b0c5dd42a924fc2169136fd5165934..19070226a94521d112df8798bd5b19218d41364d 100644 (file)
@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
                ring = to_amdgpu_ring(entity->rq->sched);
                r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
-                                  chunk_ib->ib_bytes : 0, ib);
+                                  chunk_ib->ib_bytes : 0,
+                                  AMDGPU_IB_POOL_DELAYED, ib);
                if (r) {
                        DRM_ERROR("Failed to get ib !\n");
                        return r;
@@ -1207,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct drm_sched_entity *entity = p->entity;
-       enum drm_sched_priority priority;
        struct amdgpu_bo_list_entry *e;
        struct amdgpu_job *job;
        uint64_t seq;
@@ -1257,7 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        trace_amdgpu_cs_ioctl(job);
        amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
-       priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
        amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
index 6ed36a2c5f73f2e89654d34b3cdbd9444c0c0270..8842c55d4490b3fdb57ffda94a7cfee219bfc14d 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu.h"
 #include "amdgpu_sched.h"
 #include "amdgpu_ras.h"
+#include <linux/nospec.h>
 
 #define to_amdgpu_ctx_entity(e)        \
        container_of((e), struct amdgpu_ctx_entity, entity)
@@ -72,13 +73,30 @@ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sch
        }
 }
 
-static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
+                                                enum drm_sched_priority prio,
+                                                u32 hw_ip)
+{
+       unsigned int hw_prio;
+
+       hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
+                       amdgpu_ctx_sched_prio_to_compute_prio(prio) :
+                       AMDGPU_RING_PRIO_DEFAULT;
+       hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+       if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
+               hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+
+       return hw_prio;
+}
+
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
+                                  const u32 ring)
 {
        struct amdgpu_device *adev = ctx->adev;
        struct amdgpu_ctx_entity *entity;
        struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
        unsigned num_scheds = 0;
-       enum gfx_pipe_priority hw_prio;
+       unsigned int hw_prio;
        enum drm_sched_priority priority;
        int r;
 
@@ -90,52 +108,16 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
        entity->sequence = 1;
        priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
                                ctx->init_priority : ctx->override_priority;
-       switch (hw_ip) {
-       case AMDGPU_HW_IP_GFX:
-               sched = &adev->gfx.gfx_ring[0].sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_COMPUTE:
-               hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
-               scheds = adev->gfx.compute_prio_sched[hw_prio];
-               num_scheds = adev->gfx.num_compute_sched[hw_prio];
-               break;
-       case AMDGPU_HW_IP_DMA:
-               scheds = adev->sdma.sdma_sched;
-               num_scheds = adev->sdma.num_sdma_sched;
-               break;
-       case AMDGPU_HW_IP_UVD:
-               sched = &adev->uvd.inst[0].ring.sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCE:
-               sched = &adev->vce.ring[0].sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_UVD_ENC:
-               sched = &adev->uvd.inst[0].ring_enc[0].sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCN_DEC:
-               sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
-                                           adev->vcn.num_vcn_dec_sched);
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCN_ENC:
-               sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
-                                           adev->vcn.num_vcn_enc_sched);
+       hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
+
+       hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+       scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+       num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+
+       if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+               sched = drm_sched_pick_best(scheds, num_scheds);
                scheds = &sched;
                num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCN_JPEG:
-               scheds = adev->jpeg.jpeg_sched;
-               num_scheds =  adev->jpeg.num_jpeg_sched;
-               break;
        }
 
        r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -178,7 +160,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
        ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
 
        return 0;
-
 }
 
 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
@@ -525,7 +506,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
                                            enum drm_sched_priority priority)
 {
        struct amdgpu_device *adev = ctx->adev;
-       enum gfx_pipe_priority hw_prio;
+       unsigned int hw_prio;
        struct drm_gpu_scheduler **scheds = NULL;
        unsigned num_scheds;
 
@@ -534,9 +515,11 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
 
        /* set hw priority */
        if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
-               hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
-               scheds = adev->gfx.compute_prio_sched[hw_prio];
-               num_scheds = adev->gfx.num_compute_sched[hw_prio];
+               hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
+                                                     AMDGPU_HW_IP_COMPUTE);
+               hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
+               scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+               num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
                drm_sched_entity_modify_sched(&aentity->entity, scheds,
                                              num_scheds);
        }
@@ -665,78 +648,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
        idr_destroy(&mgr->ctx_handles);
        mutex_destroy(&mgr->lock);
 }
-
-
-static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
-{
-       int num_compute_sched_normal = 0;
-       int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
-       int i;
-
-       /* use one drm sched array, gfx.compute_sched to store both high and
-        * normal priority drm compute schedulers */
-       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               if (!adev->gfx.compute_ring[i].has_high_prio)
-                       adev->gfx.compute_sched[num_compute_sched_normal++] =
-                               &adev->gfx.compute_ring[i].sched;
-               else
-                       adev->gfx.compute_sched[num_compute_sched_high--] =
-                               &adev->gfx.compute_ring[i].sched;
-       }
-
-       /* compute ring only has two priority for now */
-       i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
-       adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
-       adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-
-       i = AMDGPU_GFX_PIPE_PRIO_HIGH;
-       if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
-               /* When compute has no high priority rings then use */
-               /* normal priority sched array */
-               adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
-               adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-       } else {
-               adev->gfx.compute_prio_sched[i] =
-                       &adev->gfx.compute_sched[num_compute_sched_high - 1];
-               adev->gfx.num_compute_sched[i] =
-                       adev->gfx.num_compute_rings - num_compute_sched_normal;
-       }
-}
-
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
-{
-       int i, j;
-
-       amdgpu_ctx_init_compute_sched(adev);
-       for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-               adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
-               adev->gfx.num_gfx_sched++;
-       }
-
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
-               adev->sdma.num_sdma_sched++;
-       }
-
-       for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-               if (adev->vcn.harvest_config & (1 << i))
-                       continue;
-               adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
-                       &adev->vcn.inst[i].ring_dec.sched;
-       }
-
-       for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-               if (adev->vcn.harvest_config & (1 << i))
-                       continue;
-               for (j = 0; j < adev->vcn.num_enc_rings; ++j)
-                       adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
-                               &adev->vcn.inst[i].ring_enc[j].sched;
-       }
-
-       for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
-               if (adev->jpeg.harvest_config & (1 << i))
-                       continue;
-               adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
-                       &adev->jpeg.inst[i].ring_dec.sched;
-       }
-}
index de490f183af2bcf115182e25a24f8b9d8a34bf17..f54e1031466159cd1436836104e3dc4bdd026a07 100644 (file)
@@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
-
-
 #endif
index c0f9a651dc067bbf46ce689535934740ab7315ee..d33cb344be69f5266b4933137151ffa5feb5117a 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/pci.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
-
+#include <linux/poll.h>
 #include <drm/drm_debugfs.h>
 
 #include "amdgpu.h"
@@ -74,8 +74,82 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
        return 0;
 }
 
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned long timeout = 600 * HZ;
+       int ret;
+
+       wake_up_interruptible(&adev->autodump.gpu_hang);
+
+       ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
+       if (ret == 0) {
+               pr_err("autodump: timeout, move on to gpu recovery\n");
+               return -ETIMEDOUT;
+       }
+#endif
+       return 0;
+}
+
 #if defined(CONFIG_DEBUG_FS)
 
+static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
+{
+       struct amdgpu_device *adev = inode->i_private;
+       int ret;
+
+       file->private_data = adev;
+
+       mutex_lock(&adev->lock_reset);
+       if (adev->autodump.dumping.done) {
+               reinit_completion(&adev->autodump.dumping);
+               ret = 0;
+       } else {
+               ret = -EBUSY;
+       }
+       mutex_unlock(&adev->lock_reset);
+
+       return ret;
+}
+
+static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
+{
+       struct amdgpu_device *adev = file->private_data;
+
+       complete_all(&adev->autodump.dumping);
+       return 0;
+}
+
+static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
+{
+       struct amdgpu_device *adev = file->private_data;
+
+       poll_wait(file, &adev->autodump.gpu_hang, poll_table);
+
+       if (adev->in_gpu_reset)
+               return POLLIN | POLLRDNORM | POLLWRNORM;
+
+       return 0;
+}
+
+static const struct file_operations autodump_debug_fops = {
+       .owner = THIS_MODULE,
+       .open = amdgpu_debugfs_autodump_open,
+       .poll = amdgpu_debugfs_autodump_poll,
+       .release = amdgpu_debugfs_autodump_release,
+};
+
+static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
+{
+       init_completion(&adev->autodump.dumping);
+       complete_all(&adev->autodump.dumping);
+       init_waitqueue_head(&adev->autodump.gpu_hang);
+
+       debugfs_create_file("amdgpu_autodump", 0600,
+               adev->ddev->primary->debugfs_root,
+               adev, &autodump_debug_fops);
+}
+
 /**
  * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
  *
@@ -152,11 +226,16 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        if (use_bank) {
                if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
                    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return -EINVAL;
                }
                mutex_lock(&adev->grbm_idx_mutex);
@@ -207,6 +286,7 @@ end:
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -255,6 +335,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -263,6 +347,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -275,6 +360,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -304,6 +390,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -311,6 +401,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -325,6 +416,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -354,6 +446,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -362,6 +458,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -374,6 +471,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -403,6 +501,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -410,6 +512,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -424,6 +527,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -453,6 +557,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -461,6 +569,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -473,6 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -502,6 +612,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -509,6 +623,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -523,6 +638,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -651,16 +767,24 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
 
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
-       if (r)
+       if (r) {
+               amdgpu_virt_disable_access_debugfs(adev);
                return r;
+       }
 
-       if (size > valuesize)
+       if (size > valuesize) {
+               amdgpu_virt_disable_access_debugfs(adev);
                return -EINVAL;
+       }
 
        outsize = 0;
        x = 0;
@@ -673,6 +797,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
                }
        }
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return !r ? outsize : r;
 }
 
@@ -720,6 +845,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        /* switch to the specific se/sh/cu */
        mutex_lock(&adev->grbm_idx_mutex);
        amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -734,16 +863,20 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
-       if (!x)
+       if (!x) {
+               amdgpu_virt_disable_access_debugfs(adev);
                return -EINVAL;
+       }
 
        while (size && (offset < x * 4)) {
                uint32_t value;
 
                value = data[offset >> 2];
                r = put_user(value, (uint32_t *)buf);
-               if (r)
+               if (r) {
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
+               }
 
                result += 4;
                buf += 4;
@@ -751,6 +884,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
                size -= 4;
        }
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -805,6 +939,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        /* switch to the specific se/sh/cu */
        mutex_lock(&adev->grbm_idx_mutex);
        amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -840,6 +978,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 
 err:
        kfree(data);
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -1369,6 +1508,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
 
        amdgpu_ras_debugfs_create_all(adev);
 
+       amdgpu_debugfs_autodump_init(adev);
+
        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
                                        ARRAY_SIZE(amdgpu_debugfs_list));
 }
index de12d11015260eb3c44a9ef1cb95a434a26b5fda..2803884d338d53c1218b969fdb61c13658885660 100644 (file)
@@ -31,6 +31,11 @@ struct amdgpu_debugfs {
        unsigned                num_files;
 };
 
+struct amdgpu_autodump {
+       struct completion               dumping;
+       struct wait_queue_head          gpu_hang;
+};
+
 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
 int amdgpu_debugfs_init(struct amdgpu_device *adev);
 void amdgpu_debugfs_fini(struct amdgpu_device *adev);
@@ -40,3 +45,4 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
index 559dc24ef436719f1b25992070b360d2e911fc16..545beebcf43e8758488219350c08d9c950e505e1 100644 (file)
 #include "amdgpu_xgmi.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_pmu.h"
+#include "amdgpu_fru_eeprom.h"
 
 #include <linux/suspend.h>
 #include <drm/task_barrier.h>
+#include <linux/pm_runtime.h>
 
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -137,6 +139,72 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 
 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 
+/**
+ * DOC: product_name
+ *
+ * The amdgpu driver provides a sysfs API for reporting the product name
+ * for the device
+ * The file serial_number is used for this and returns the product name
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_name(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+}
+
+static DEVICE_ATTR(product_name, S_IRUGO,
+               amdgpu_device_get_product_name, NULL);
+
+/**
+ * DOC: product_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the part number
+ * for the device
+ * The file serial_number is used for this and returns the part number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_number(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+}
+
+static DEVICE_ATTR(product_number, S_IRUGO,
+               amdgpu_device_get_product_number, NULL);
+
+/**
+ * DOC: serial_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the serial number
+ * for the device
+ * The file serial_number is used for this and returns the serial number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_serial_number(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+}
+
+static DEVICE_ATTR(serial_number, S_IRUGO,
+               amdgpu_device_get_serial_number, NULL);
+
 /**
  * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
  *
@@ -231,10 +299,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 }
 
 /*
- * MMIO register access helper functions.
+ * device register access helper functions.
  */
 /**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -242,25 +310,19 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
  *
  * Returns the 32 bit value from the offset specified.
  */
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
-                       uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+                           uint32_t acc_flags)
 {
        uint32_t ret;
 
-       if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_rreg(adev, reg);
 
-       if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+       if ((reg * 4) < adev->rmmio_size)
                ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-       }
-       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+       else
+               ret = adev->pcie_rreg(adev, (reg * 4));
+       trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
        return ret;
 }
 
@@ -306,28 +368,19 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
                BUG();
 }
 
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg,
+                                            uint32_t v, uint32_t acc_flags)
 {
-       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+       trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 
-       if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+       if ((reg * 4) < adev->rmmio_size)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-       }
-
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
-               udelay(500);
-       }
+       else
+               adev->pcie_wreg(adev, (reg * 4), v);
 }
 
 /**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -336,17 +389,13 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
  *
  * Writes the value specified to the offset specified.
  */
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+                       uint32_t acc_flags)
 {
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
-               adev->last_mm_index = v;
-       }
-
-       if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_wreg(adev, reg, v);
 
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
 }
 
 /*
@@ -365,7 +414,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
                        return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
        }
 
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
 }
 
 /**
@@ -397,20 +446,12 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
  */
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
-               adev->last_mm_index = v;
-       }
-
        if ((reg * 4) < adev->rio_mem_size)
                iowrite32(v, adev->rio_mem + (reg * 4));
        else {
                iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
                iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
        }
-
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
-               udelay(500);
-       }
 }
 
 /**
@@ -1126,6 +1167,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 
        adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 
+       amdgpu_gmc_tmz_set(adev);
+
        return 0;
 }
 
@@ -1147,7 +1190,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                return;
 
        if (state == VGA_SWITCHEROO_ON) {
-               pr_info("amdgpu: switched on\n");
+               pr_info("switched on\n");
                /* don't suspend or resume card normally */
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
@@ -1161,7 +1204,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
                drm_kms_helper_poll_enable(dev);
        } else {
-               pr_info("amdgpu: switched off\n");
+               pr_info("switched off\n");
                drm_kms_helper_poll_disable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                amdgpu_device_suspend(dev, true);
@@ -1731,9 +1774,28 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
        amdgpu_amdkfd_device_probe(adev);
 
        if (amdgpu_sriov_vf(adev)) {
+               /* handle vbios stuff prior full access mode for new handshake */
+               if (adev->virt.req_init_data_ver == 1) {
+                       if (!amdgpu_get_bios(adev)) {
+                               DRM_ERROR("failed to get vbios\n");
+                               return -EINVAL;
+                       }
+
+                       r = amdgpu_atombios_init(adev);
+                       if (r) {
+                               dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+                               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+                               return r;
+                       }
+               }
+       }
+
+       /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios
+        * will not be prepared by host for this VF */
+       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) {
                r = amdgpu_virt_request_full_gpu(adev, true);
                if (r)
-                       return -EAGAIN;
+                       return r;
        }
 
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -1763,6 +1825,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                }
                /* get the vbios after the asic_funcs are set up */
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+                       /* skip vbios handling for new handshake */
+                       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
+                               continue;
+
                        /* Read BIOS */
                        if (!amdgpu_get_bios(adev))
                                return -EINVAL;
@@ -1889,6 +1955,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                return r;
 
+       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) {
+               r = amdgpu_virt_request_full_gpu(adev, true);
+               if (r)
+                       return -EAGAIN;
+       }
+
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -1975,6 +2047,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                amdgpu_xgmi_add_device(adev);
        amdgpu_amdkfd_device_init(adev);
 
+       amdgpu_fru_get_product_info(adev);
+
 init_failed:
        if (amdgpu_sriov_vf(adev))
                amdgpu_virt_release_full_gpu(adev, true);
@@ -2008,8 +2082,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
  */
 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
 {
-       return !!memcmp(adev->gart.ptr, adev->reset_magic,
-                       AMDGPU_RESET_MAGIC_NUM);
+       if (memcmp(adev->gart.ptr, adev->reset_magic,
+                       AMDGPU_RESET_MAGIC_NUM))
+               return true;
+
+       if (!adev->in_gpu_reset)
+               return false;
+
+       /*
+        * For all ASICs with baco/mode1 reset, the VRAM is
+        * always assumed to be lost.
+        */
+       switch (amdgpu_asic_reset_method(adev)) {
+       case AMD_RESET_METHOD_BACO:
+       case AMD_RESET_METHOD_MODE1:
+               return true;
+       default:
+               return false;
+       }
 }
 
 /**
@@ -2155,6 +2245,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
                adev->ip_blocks[i].status.late_initialized = true;
        }
 
+       amdgpu_ras_set_error_query_ready(adev, true);
+
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
 
@@ -2187,7 +2279,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
                                if (gpu_instance->adev->flags & AMD_IS_APU)
                                        continue;
 
-                               r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+                               r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
+                                               AMDGPU_XGMI_PSTATE_MIN);
                                if (r) {
                                        DRM_ERROR("pstate setting failed (%d).\n", r);
                                        break;
@@ -2340,6 +2433,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
 {
        int i, r;
 
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
@@ -2767,12 +2862,12 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
         * By default timeout for non compute jobs is 10000.
         * And there is no timeout enforced on compute jobs.
         * In SR-IOV or passthrough mode, timeout for compute
-        * jobs are 10000 by default.
+        * jobs are 60000 by default.
         */
        adev->gfx_timeout = msecs_to_jiffies(10000);
        adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
        if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
-               adev->compute_timeout = adev->gfx_timeout;
+               adev->compute_timeout =  msecs_to_jiffies(60000);
        else
                adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
 
@@ -2823,6 +2918,14 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
        return ret;
 }
 
+static const struct attribute *amdgpu_dev_attributes[] = {
+       &dev_attr_product_name.attr,
+       &dev_attr_product_number.attr,
+       &dev_attr_serial_number.attr,
+       &dev_attr_pcie_replay_count.attr,
+       NULL
+};
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -2924,9 +3027,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&adev->shadow_list);
        mutex_init(&adev->shadow_list_lock);
 
-       INIT_LIST_HEAD(&adev->ring_lru_list);
-       spin_lock_init(&adev->ring_lru_list_lock);
-
        INIT_DELAYED_WORK(&adev->delayed_init_work,
                          amdgpu_device_delayed_init_work_handler);
        INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -2935,7 +3035,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
 
        adev->gfx.gfx_off_req_count = 1;
-       adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+       adev->pm.ac_power = power_supply_is_system_supplied() > 0;
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
@@ -2984,18 +3084,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
                adev->enable_mes = true;
 
-       if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
-               r = amdgpu_discovery_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "amdgpu_discovery_init failed\n");
-                       return r;
-               }
-       }
-
-       /* early init functions */
-       r = amdgpu_device_ip_early_init(adev);
-       if (r)
-               return r;
+       /* detect hw virtualization here */
+       amdgpu_detect_virtualization(adev);
 
        r = amdgpu_device_get_job_timeout_settings(adev);
        if (r) {
@@ -3003,6 +3093,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                return r;
        }
 
+       /* early init functions */
+       r = amdgpu_device_ip_early_init(adev);
+       if (r)
+               return r;
+
        /* doorbell bar mapping and doorbell index init*/
        amdgpu_device_doorbell_init(adev);
 
@@ -3109,14 +3204,13 @@ fence_driver_init:
                goto failed;
        }
 
-       DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+       dev_info(adev->dev,
+               "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
                        adev->gfx.config.max_shader_engines,
                        adev->gfx.config.max_sh_per_se,
                        adev->gfx.config.max_cu_per_sh,
                        adev->gfx.cu_info.number);
 
-       amdgpu_ctx_init_sched(adev);
-
        adev->accel_working = true;
 
        amdgpu_vm_check_compute_bug(adev);
@@ -3181,9 +3275,9 @@ fence_driver_init:
        queue_delayed_work(system_wq, &adev->delayed_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
-       r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
+       r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
        if (r) {
-               dev_err(adev->dev, "Could not create pcie_replay_count");
+               dev_err(adev->dev, "Could not create amdgpu device attr\n");
                return r;
        }
 
@@ -3266,9 +3360,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        adev->rmmio = NULL;
        amdgpu_device_doorbell_fini(adev);
 
-       device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
        if (adev->ucode_sysfs_en)
                amdgpu_ucode_sysfs_fini(adev);
+
+       sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
        if (IS_ENABLED(CONFIG_PERF_EVENTS))
                amdgpu_pmu_fini(adev);
        if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
@@ -3354,15 +3449,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
                }
        }
 
-       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
-       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
-
-       amdgpu_amdkfd_suspend(adev, !fbcon);
-
        amdgpu_ras_suspend(adev);
 
        r = amdgpu_device_ip_suspend_phase1(adev);
 
+       amdgpu_amdkfd_suspend(adev, !fbcon);
+
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
@@ -3739,6 +3831,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
        if (r)
                return r;
 
+       amdgpu_amdkfd_pre_reset(adev);
+
        /* Resume IP prior to SMC */
        r = amdgpu_device_ip_reinit_early_sriov(adev);
        if (r)
@@ -3833,6 +3927,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
        int i, r = 0;
        bool need_full_reset  = *need_full_reset_arg;
 
+       amdgpu_debugfs_wait_dump(adev);
+
        /* block all schedulers and reset given job's ring */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
@@ -4037,6 +4133,64 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
        mutex_unlock(&adev->lock_reset);
 }
 
+static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+{
+       struct pci_dev *p = NULL;
+
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (p) {
+               pm_runtime_enable(&(p->dev));
+               pm_runtime_resume(&(p->dev));
+       }
+}
+
+static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+{
+       enum amd_reset_method reset_method;
+       struct pci_dev *p = NULL;
+       u64 expires;
+
+       /*
+        * For now, only BACO and mode1 reset are confirmed
+        * to suffer the audio issue without proper suspended.
+        */
+       reset_method = amdgpu_asic_reset_method(adev);
+       if ((reset_method != AMD_RESET_METHOD_BACO) &&
+            (reset_method != AMD_RESET_METHOD_MODE1))
+               return -EINVAL;
+
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (!p)
+               return -ENODEV;
+
+       expires = pm_runtime_autosuspend_expiration(&(p->dev));
+       if (!expires)
+               /*
+                * If we cannot get the audio device autosuspend delay,
+                * a fixed 4S interval will be used. Considering 3S is
+                * the audio controller default autosuspend delay setting.
+                * 4S used here is guaranteed to cover that.
+                */
+               expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
+
+       while (!pm_runtime_status_suspended(&(p->dev))) {
+               if (!pm_runtime_suspend(&(p->dev)))
+                       break;
+
+               if (expires < ktime_get_mono_fast_ns()) {
+                       dev_warn(adev->dev, "failed to suspend display audio\n");
+                       /* TODO: abort the succeeding gpu reset? */
+                       return -ETIMEDOUT;
+               }
+       }
+
+       pm_runtime_disable(&(p->dev));
+
+       return 0;
+}
+
 /**
  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
  *
@@ -4052,7 +4206,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
        struct list_head device_list, *device_list_handle =  NULL;
-       bool need_full_reset, job_signaled;
+       bool need_full_reset = false;
+       bool job_signaled = false;
        struct amdgpu_hive_info *hive = NULL;
        struct amdgpu_device *tmp_adev = NULL;
        int i, r = 0;
@@ -4060,6 +4215,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        bool use_baco =
                (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
                true : false;
+       bool audio_suspended = false;
 
        /*
         * Flush RAM to disk so that after reboot
@@ -4073,16 +4229,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                emergency_restart();
        }
 
-       need_full_reset = job_signaled = false;
-       INIT_LIST_HEAD(&device_list);
-
        dev_info(adev->dev, "GPU %s begin!\n",
                (in_ras_intr && !use_baco) ? "jobs stop":"reset");
 
-       cancel_delayed_work_sync(&adev->delayed_init_work);
-
-       hive = amdgpu_get_xgmi_hive(adev, false);
-
        /*
         * Here we trylock to avoid chain of resets executing from
         * either trigger by jobs on different adevs in XGMI hive or jobs on
@@ -4090,39 +4239,25 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
         * We always reset all schedulers for device and all devices for XGMI
         * hive so that should take care of them too.
         */
-
+       hive = amdgpu_get_xgmi_hive(adev, true);
        if (hive && !mutex_trylock(&hive->reset_lock)) {
                DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
                          job ? job->base.id : -1, hive->hive_id);
+               mutex_unlock(&hive->hive_lock);
                return 0;
        }
 
-       /* Start with adev pre asic reset first for soft reset check.*/
-       if (!amdgpu_device_lock_adev(adev, !hive)) {
-               DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
-                         job ? job->base.id : -1);
-               return 0;
-       }
-
-       /* Block kfd: SRIOV would do it separately */
-       if (!amdgpu_sriov_vf(adev))
-                amdgpu_amdkfd_pre_reset(adev);
-
-       /* Build list of devices to reset */
-       if  (adev->gmc.xgmi.num_physical_nodes > 1) {
-               if (!hive) {
-                       /*unlock kfd: SRIOV would do it separately */
-                       if (!amdgpu_sriov_vf(adev))
-                               amdgpu_amdkfd_post_reset(adev);
-                       amdgpu_device_unlock_adev(adev);
+       /*
+        * Build list of devices to reset.
+        * In case we are in XGMI hive mode, resort the device list
+        * to put adev in the 1st position.
+        */
+       INIT_LIST_HEAD(&device_list);
+       if (adev->gmc.xgmi.num_physical_nodes > 1) {
+               if (!hive)
                        return -ENODEV;
-               }
-
-               /*
-                * In case we are in XGMI hive mode device reset is done for all the
-                * nodes in the hive to retrain all XGMI links and hence the reset
-                * sequence is executed in loop on all nodes.
-                */
+               if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
+                       list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
                device_list_handle = &hive->device_list;
        } else {
                list_add_tail(&adev->gmc.xgmi.head, &device_list);
@@ -4131,19 +4266,40 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
        /* block all schedulers and reset given job's ring */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-               if (tmp_adev != adev) {
-                       amdgpu_device_lock_adev(tmp_adev, false);
-                       if (!amdgpu_sriov_vf(tmp_adev))
-                                       amdgpu_amdkfd_pre_reset(tmp_adev);
+               if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
+                       DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+                                 job ? job->base.id : -1);
+                       mutex_unlock(&hive->hive_lock);
+                       return 0;
                }
 
+               /*
+                * Try to put the audio codec into suspend state
+                * before gpu reset started.
+                *
+                * Due to the power domain of the graphics device
+                * is shared with AZ power domain. Without this,
+                * we may change the audio hardware from behind
+                * the audio driver's back. That will trigger
+                * some audio codec errors.
+                */
+               if (!amdgpu_device_suspend_display_audio(tmp_adev))
+                       audio_suspended = true;
+
+               amdgpu_ras_set_error_query_ready(tmp_adev, false);
+
+               cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
+
+               if (!amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_pre_reset(tmp_adev);
+
                /*
                 * Mark these ASICs to be reseted as untracked first
                 * And add them back after reset completed
                 */
                amdgpu_unregister_gpu_instance(tmp_adev);
 
-               amdgpu_fbdev_set_suspend(adev, 1);
+               amdgpu_fbdev_set_suspend(tmp_adev, 1);
 
                /* disable ras on ALL IPs */
                if (!(in_ras_intr && !use_baco) &&
@@ -4163,7 +4319,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                }
        }
 
-
        if (in_ras_intr && !use_baco)
                goto skip_sched_resume;
 
@@ -4174,30 +4329,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
         * job->base holds a reference to parent fence
         */
        if (job && job->base.s_fence->parent &&
-           dma_fence_is_signaled(job->base.s_fence->parent))
+           dma_fence_is_signaled(job->base.s_fence->parent)) {
                job_signaled = true;
-
-       if (job_signaled) {
                dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
                goto skip_hw_reset;
        }
 
-
-       /* Guilty job will be freed after this*/
-       r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
-       if (r) {
-               /*TODO Should we stop ?*/
-               DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
-                         r, adev->ddev->unique);
-               adev->asic_reset_res = r;
-       }
-
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-
-               if (tmp_adev == adev)
-                       continue;
-
                r = amdgpu_device_pre_asic_reset(tmp_adev,
                                                 NULL,
                                                 &need_full_reset);
@@ -4259,11 +4398,15 @@ skip_sched_resume:
                /*unlock kfd: SRIOV would do it separately */
                if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
                        amdgpu_amdkfd_post_reset(tmp_adev);
+               if (audio_suspended)
+                       amdgpu_device_resume_display_audio(tmp_adev);
                amdgpu_device_unlock_adev(tmp_adev);
        }
 
-       if (hive)
+       if (hive) {
                mutex_unlock(&hive->reset_lock);
+               mutex_unlock(&hive->hive_lock);
+       }
 
        if (r)
                dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
index 057f6ea645d7eb12384007a96365002ef5e0aa17..61a26c15c8dd50398b4dc0909372863b15fc1782 100644 (file)
@@ -52,9 +52,6 @@ struct amdgpu_df_funcs {
        uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
        void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
                         uint32_t ficadl_val, uint32_t ficadh_val);
-       uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
-                                      uint32_t df_inst);
-       uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_df {
index 27d8ae19a7a40ebd27489ab08546eac76133fe87..b5d6274952a5e664543ca363e2133789ea0fa4cf 100644 (file)
@@ -23,9 +23,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_discovery.h"
-#include "soc15_common.h"
 #include "soc15_hw_ip.h"
-#include "nbio/nbio_2_3_offset.h"
 #include "discovery.h"
 
 #define mmRCC_CONFIG_MEMSIZE   0xde3
@@ -135,9 +133,10 @@ static int hw_id_map[MAX_HWIP] = {
 static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
 {
        uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
-       uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+       uint64_t pos = vram_size - adev->discovery_tmr_size;
 
-       amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
+       amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+                                 adev->discovery_tmr_size, false);
        return 0;
 }
 
@@ -158,7 +157,7 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size
        return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
 }
 
-int amdgpu_discovery_init(struct amdgpu_device *adev)
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
 {
        struct table_info *info;
        struct binary_header *bhdr;
@@ -169,17 +168,18 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        uint16_t checksum;
        int r;
 
-       adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
-       if (!adev->discovery)
+       adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
+       adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
+       if (!adev->discovery_bin)
                return -ENOMEM;
 
-       r = amdgpu_discovery_read_binary(adev, adev->discovery);
+       r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
        if (r) {
                DRM_ERROR("failed to read ip discovery binary\n");
                goto out;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
+       bhdr = (struct binary_header *)adev->discovery_bin;
 
        if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
                DRM_ERROR("invalid ip discovery binary signature\n");
@@ -192,7 +192,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        size = bhdr->binary_size - offset;
        checksum = bhdr->binary_checksum;
 
-       if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+       if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
                                              size, checksum)) {
                DRM_ERROR("invalid ip discovery binary checksum\n");
                r = -EINVAL;
@@ -202,7 +202,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        info = &bhdr->table_list[IP_DISCOVERY];
        offset = le16_to_cpu(info->offset);
        checksum = le16_to_cpu(info->checksum);
-       ihdr = (struct ip_discovery_header *)(adev->discovery + offset);
+       ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
 
        if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
                DRM_ERROR("invalid ip discovery data table signature\n");
@@ -210,7 +210,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
                goto out;
        }
 
-       if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+       if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
                                              ihdr->size, checksum)) {
                DRM_ERROR("invalid ip discovery data table checksum\n");
                r = -EINVAL;
@@ -220,9 +220,9 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        info = &bhdr->table_list[GC];
        offset = le16_to_cpu(info->offset);
        checksum = le16_to_cpu(info->checksum);
-       ghdr = (struct gpu_info_header *)(adev->discovery + offset);
+       ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
 
-       if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+       if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
                                              ghdr->size, checksum)) {
                DRM_ERROR("invalid gc data table checksum\n");
                r = -EINVAL;
@@ -232,16 +232,16 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        return 0;
 
 out:
-       kfree(adev->discovery);
-       adev->discovery = NULL;
+       kfree(adev->discovery_bin);
+       adev->discovery_bin = NULL;
 
        return r;
 }
 
 void amdgpu_discovery_fini(struct amdgpu_device *adev)
 {
-       kfree(adev->discovery);
-       adev->discovery = NULL;
+       kfree(adev->discovery_bin);
+       adev->discovery_bin = NULL;
 }
 
 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@@ -257,14 +257,16 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
        uint8_t num_base_address;
        int hw_ip;
        int i, j, k;
+       int r;
 
-       if (!adev->discovery) {
-               DRM_ERROR("ip discovery uninitialized\n");
-               return -EINVAL;
+       r = amdgpu_discovery_init(adev);
+       if (r) {
+               DRM_ERROR("amdgpu_discovery_init failed\n");
+               return r;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
-       ihdr = (struct ip_discovery_header *)(adev->discovery +
+       bhdr = (struct binary_header *)adev->discovery_bin;
+       ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
                        le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
        num_dies = le16_to_cpu(ihdr->num_dies);
 
@@ -272,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
 
        for (i = 0; i < num_dies; i++) {
                die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
-               dhdr = (struct die_header *)(adev->discovery + die_offset);
+               dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
                num_ips = le16_to_cpu(dhdr->num_ips);
                ip_offset = die_offset + sizeof(*dhdr);
 
@@ -286,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                le16_to_cpu(dhdr->die_id), num_ips);
 
                for (j = 0; j < num_ips; j++) {
-                       ip = (struct ip *)(adev->discovery + ip_offset);
+                       ip = (struct ip *)(adev->discovery_bin + ip_offset);
                        num_base_address = ip->num_base_address;
 
                        DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -335,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
        uint16_t num_ips;
        int i, j;
 
-       if (!adev->discovery) {
+       if (!adev->discovery_bin) {
                DRM_ERROR("ip discovery uninitialized\n");
                return -EINVAL;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
-       ihdr = (struct ip_discovery_header *)(adev->discovery +
+       bhdr = (struct binary_header *)adev->discovery_bin;
+       ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
                        le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
        num_dies = le16_to_cpu(ihdr->num_dies);
 
        for (i = 0; i < num_dies; i++) {
                die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
-               dhdr = (struct die_header *)(adev->discovery + die_offset);
+               dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
                num_ips = le16_to_cpu(dhdr->num_ips);
                ip_offset = die_offset + sizeof(*dhdr);
 
                for (j = 0; j < num_ips; j++) {
-                       ip = (struct ip *)(adev->discovery + ip_offset);
+                       ip = (struct ip *)(adev->discovery_bin + ip_offset);
 
                        if (le16_to_cpu(ip->hw_id) == hw_id) {
                                if (major)
@@ -375,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
        struct binary_header *bhdr;
        struct gc_info_v1_0 *gc_info;
 
-       if (!adev->discovery) {
+       if (!adev->discovery_bin) {
                DRM_ERROR("ip discovery uninitialized\n");
                return -EINVAL;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
-       gc_info = (struct gc_info_v1_0 *)(adev->discovery +
+       bhdr = (struct binary_header *)adev->discovery_bin;
+       gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
                        le16_to_cpu(bhdr->table_list[GC].offset));
 
        adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
index ba78e15d9b055b3ecca1e9aba31d2fb268507c87..d50d597c45ed48315461680d6ba2832cb0822d28 100644 (file)
@@ -26,7 +26,6 @@
 
 #define DISCOVERY_TMR_SIZE  (64 << 10)
 
-int amdgpu_discovery_init(struct amdgpu_device *adev);
 void amdgpu_discovery_fini(struct amdgpu_device *adev);
 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
index ffeb20f11c07caf54eeb5632c6b85b229c221190..43d8ed7dbd0018d66192e43f487f5aadc814fc4c 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
 #include <linux/dma-fence-array.h>
+#include <linux/pci-p2pdma.h>
 
 /**
  * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
@@ -179,6 +180,9 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        int r;
 
+       if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+               attach->peer2peer = false;
+
        if (attach->dev->driver == adev->dev->driver)
                return 0;
 
@@ -272,14 +276,21 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
        struct dma_buf *dma_buf = attach->dmabuf;
        struct drm_gem_object *obj = dma_buf->priv;
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct sg_table *sgt;
        long r;
 
        if (!bo->pin_count) {
-               /* move buffer into GTT */
+               /* move buffer into GTT or VRAM */
                struct ttm_operation_ctx ctx = { false, false };
+               unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
 
-               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+               if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+                   attach->peer2peer) {
+                       bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                       domains |= AMDGPU_GEM_DOMAIN_VRAM;
+               }
+               amdgpu_bo_placement_from_domain(bo, domains);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                if (r)
                        return ERR_PTR(r);
@@ -289,20 +300,34 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
                return ERR_PTR(-EBUSY);
        }
 
-       sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
-       if (IS_ERR(sgt))
-               return sgt;
-
-       if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-                             DMA_ATTR_SKIP_CPU_SYNC))
-               goto error_free;
+       switch (bo->tbo.mem.mem_type) {
+       case TTM_PL_TT:
+               sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+                                           bo->tbo.num_pages);
+               if (IS_ERR(sgt))
+                       return sgt;
+
+               if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+                                     DMA_ATTR_SKIP_CPU_SYNC))
+                       goto error_free;
+               break;
+
+       case TTM_PL_VRAM:
+               r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
+                                             dir, &sgt);
+               if (r)
+                       return ERR_PTR(r);
+               break;
+       default:
+               return ERR_PTR(-EINVAL);
+       }
 
        return sgt;
 
 error_free:
        sg_free_table(sgt);
        kfree(sgt);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(-EBUSY);
 }
 
 /**
@@ -318,9 +343,18 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
                                 struct sg_table *sgt,
                                 enum dma_data_direction dir)
 {
-       dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-       sg_free_table(sgt);
-       kfree(sgt);
+       struct dma_buf *dma_buf = attach->dmabuf;
+       struct drm_gem_object *obj = dma_buf->priv;
+       struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+       if (sgt->sgl->page_link) {
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+               sg_free_table(sgt);
+               kfree(sgt);
+       } else {
+               amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
+       }
 }
 
 /**
@@ -514,6 +548,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
 }
 
 static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+       .allow_peer2peer = true,
        .move_notify = amdgpu_dma_buf_move_notify
 };
 
index ba1bb95a3cf93c7864cb18a6f00b898999836980..d2a105e3bf7cccd98e70681b31b32f028d0e9367 100644 (file)
@@ -856,7 +856,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
                                const char *name = pp_lib_thermal_controller_names[controller->ucType];
                                info.addr = controller->ucI2cAddress >> 1;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
                        }
                } else {
                        DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
@@ -1188,3 +1188,13 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 
        return ret;
 }
+
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
+{
+       struct smu_context *smu = &adev->smu;
+
+       if (is_support_sw_smu(adev))
+               return smu_allow_xgmi_power_down(smu, en);
+
+       return 0;
+}
\ No newline at end of file
index 936d85aa0fbc5fd4387793e6d851c9cddd45c028..956f6c71067081da2d217dbe700cc2b4319a3d09 100644 (file)
@@ -538,4 +538,6 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
                             uint32_t cstate);
 
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
+
 #endif
index 8ea86ffdea0d8dfbcf860c67bc1cfc08d26885a8..a0e5b54b6e470aa330d068c244e59a34cbc60555 100644 (file)
  * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
  * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
  * - 3.36.0 - Allow reading more status registers on si/cik
+ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
+ * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       36
+#define KMS_DRIVER_MINOR       38
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
@@ -144,6 +146,7 @@ int amdgpu_discovery = -1;
 int amdgpu_mes = 0;
 int amdgpu_noretry;
 int amdgpu_force_asic_type = -1;
+int amdgpu_tmz = 0;
 
 struct amdgpu_mgpu_info mgpu_info = {
        .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
@@ -687,13 +690,12 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
 
 /**
  * DOC: hws_gws_support(bool)
- * Whether HWS support gws barriers. Default value: false (not supported)
- * This will be replaced with a MEC firmware version check once firmware
- * is ready
+ * Assume that HWS supports GWS barriers regardless of what firmware version
+ * check says. Default value: false (rely on MEC2 firmware version check).
  */
 bool hws_gws_support;
 module_param(hws_gws_support, bool, 0444);
-MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
+MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
 
 /**
   * DOC: queue_preemption_timeout_ms (int)
@@ -728,6 +730,16 @@ uint amdgpu_dm_abm_level = 0;
 MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
 module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
 
+/**
+ * DOC: tmz (int)
+ * Trusted Memory Zone (TMZ) is a method to protect data being written
+ * to or read from memory.
+ *
+ * The default value: 0 (off).  TODO: change to auto till it is completed.
+ */
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
+module_param_named(tmz, amdgpu_tmz, int, 0444);
+
 static const struct pci_device_id pciidlist[] = {
 #ifdef  CONFIG_DRM_AMDGPU_SI
        {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1163,14 +1175,6 @@ static int amdgpu_pmops_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-       /* GPU comes up enabled by the bios on resume */
-       if (amdgpu_device_supports_boco(drm_dev) ||
-           amdgpu_device_supports_baco(drm_dev)) {
-               pm_runtime_disable(dev);
-               pm_runtime_set_active(dev);
-               pm_runtime_enable(dev);
-       }
-
        return amdgpu_device_resume(drm_dev, true);
 }
 
@@ -1180,7 +1184,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
        struct amdgpu_device *adev = drm_dev->dev_private;
        int r;
 
+       adev->in_hibernate = true;
        r = amdgpu_device_suspend(drm_dev, true);
+       adev->in_hibernate = false;
        if (r)
                return r;
        return amdgpu_asic_reset(adev);
index 9ae7b61f696a244c7182b06843330728ec455e47..25ddb482466a7f034196c6241e86d4ae16b232b4 100644 (file)
@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
        u32 cpp;
        u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
                               AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS     |
-                              AMDGPU_GEM_CREATE_VRAM_CLEARED        |
-                              AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+                              AMDGPU_GEM_CREATE_VRAM_CLEARED;
 
        info = drm_get_format_info(adev->ddev, mode_cmd);
        cpp = info->cpp[0];
index 7531527067dfb164b71468379fc79647d6b552de..d878fe7fee51cc6c44b2475800da30b9c7e97619 100644 (file)
@@ -192,14 +192,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
  * Used For polling fence.
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+                             uint32_t timeout)
 {
        uint32_t seq;
+       signed long r;
 
        if (!s)
                return -EINVAL;
 
        seq = ++ring->fence_drv.sync_seq;
+       r = amdgpu_fence_wait_polling(ring,
+                                     seq - ring->fence_drv.num_fences_mask,
+                                     timeout);
+       if (r < 1)
+               return -ETIMEDOUT;
+
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, 0);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
new file mode 100644 (file)
index 0000000..815c072
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "smu_v11_0_i2c.h"
+#include "atom.h"
+
+#define I2C_PRODUCT_INFO_ADDR          0xAC
+#define I2C_PRODUCT_INFO_ADDR_SIZE     0x2
+#define I2C_PRODUCT_INFO_OFFSET                0xC0
+
+bool is_fru_eeprom_supported(struct amdgpu_device *adev)
+{
+       /* TODO: Gaming SKUs don't have the FRU EEPROM.
+        * Use this hack to address hangs on modprobe on gaming SKUs
+        * until a proper solution can be implemented by only supporting
+        * the explicit chip IDs for VG20 Server cards
+        *
+        * TODO: Add list of supported Arcturus DIDs once confirmed
+        */
+       if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
+           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
+           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
+               return true;
+       return false;
+}
+
+int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
+                          unsigned char *buff)
+{
+       int ret, size;
+       struct i2c_msg msg = {
+                       .addr   = I2C_PRODUCT_INFO_ADDR,
+                       .flags  = I2C_M_RD,
+                       .buf    = buff,
+       };
+       buff[0] = 0;
+       buff[1] = addrptr;
+       msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
+       ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+       if (ret < 1) {
+               DRM_WARN("FRU: Failed to get size field");
+               return ret;
+       }
+
+       /* The size returned by the i2c requires subtraction of 0xC0 since the
+        * size apparently always reports as 0xC0+actual size.
+        */
+       size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
+       /* Add 1 since address field was 1 byte */
+       buff[1] = addrptr + 1;
+
+       msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
+       ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+       if (ret < 1) {
+               DRM_WARN("FRU: Failed to get data field");
+               return ret;
+       }
+
+       return size;
+}
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+{
+       unsigned char buff[34];
+       int addrptr = 0, size = 0;
+
+       if (!is_fru_eeprom_supported(adev))
+               return 0;
+
+       /* If algo exists, it means that the i2c_adapter's initialized */
+       if (!adev->pm.smu_i2c.algo) {
+               DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+               return 0;
+       }
+
+       /* There's a lot of repetition here. This is due to the FRU having
+        * variable-length fields. To get the information, we have to find the
+        * size of each field, and then keep reading along and reading along
+        * until we get all of the data that we want. We use addrptr to track
+        * the address as we go
+        */
+
+       /* The first fields are all of size 1-byte, from 0-7 are offsets that
+        * contain information that isn't useful to us.
+        * Bytes 8-a are all 1-byte and refer to the size of the entire struct,
+        * and the language field, so just start from 0xb, manufacturer size
+        */
+       addrptr = 0xb;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
+               return size;
+       }
+
+       /* Increment the addrptr by the size of the field, and 1 due to the
+        * size field being 1 byte. This pattern continues below.
+        */
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU product name, ret:%d", size);
+               return size;
+       }
+
+       /* Product name should only be 32 characters. Any more,
+        * and something could be wrong. Cap it at 32 to be safe
+        */
+       if (size > 32) {
+               DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
+               size = 32;
+       }
+       /* Start at 2 due to buff using fields 0 and 1 for the address */
+       memcpy(adev->product_name, &buff[2], size);
+       adev->product_name[size] = '\0';
+
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU product number, ret:%d", size);
+               return size;
+       }
+
+       /* Product number should only be 16 characters. Any more,
+        * and something could be wrong. Cap it at 16 to be safe
+        */
+       if (size > 16) {
+               DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
+               size = 16;
+       }
+       memcpy(adev->product_number, &buff[2], size);
+       adev->product_number[size] = '\0';
+
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU product version, ret:%d", size);
+               return size;
+       }
+
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
+               return size;
+       }
+
+       /* Serial number should only be 16 characters. Any more,
+        * and something could be wrong. Cap it at 16 to be safe
+        */
+       if (size > 16) {
+               DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
+               size = 16;
+       }
+       memcpy(adev->serial, &buff[2], size);
+       adev->serial[size] = '\0';
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
new file mode 100644 (file)
index 0000000..968115c
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_PRODINFO_H__
+#define __AMDGPU_PRODINFO_H__
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
+
+#endif  // __AMDGPU_PRODINFO_H__
index 4277125a79ee45ef61a8f9ad42f903e9b0a0fe7f..4ed9958af94e3b8d01416f7bfae93ef2e26244a5 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/pci.h>
+#include <linux/dma-buf.h>
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_debugfs.h>
@@ -161,16 +162,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
        struct amdgpu_bo_list_entry vm_pd;
        struct list_head list, duplicates;
+       struct dma_fence *fence = NULL;
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
-       int r;
+       long r;
 
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&duplicates);
 
        tv.bo = &bo->tbo;
-       tv.num_shared = 1;
+       tv.num_shared = 2;
        list_add(&tv.head, &list);
 
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -178,28 +180,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%d)\n", r);
+                       "we fail to reserve bo (%ld)\n", r);
                return;
        }
        bo_va = amdgpu_vm_bo_find(vm, bo);
-       if (bo_va && --bo_va->ref_count == 0) {
-               amdgpu_vm_bo_rmv(adev, bo_va);
-
-               if (amdgpu_vm_ready(vm)) {
-                       struct dma_fence *fence = NULL;
+       if (!bo_va || --bo_va->ref_count)
+               goto out_unlock;
 
-                       r = amdgpu_vm_clear_freed(adev, vm, &fence);
-                       if (unlikely(r)) {
-                               dev_err(adev->dev, "failed to clear page "
-                                       "tables on GEM object close (%d)\n", r);
-                       }
+       amdgpu_vm_bo_rmv(adev, bo_va);
+       if (!amdgpu_vm_ready(vm))
+               goto out_unlock;
 
-                       if (fence) {
-                               amdgpu_bo_fence(bo, fence, true);
-                               dma_fence_put(fence);
-                       }
-               }
+       fence = dma_resv_get_excl(bo->tbo.base.resv);
+       if (fence) {
+               amdgpu_bo_fence(bo, fence, true);
+               fence = NULL;
        }
+
+       r = amdgpu_vm_clear_freed(adev, vm, &fence);
+       if (r || !fence)
+               goto out_unlock;
+
+       amdgpu_bo_fence(bo, fence, true);
+       dma_fence_put(fence);
+
+out_unlock:
+       if (unlikely(r < 0))
+               dev_err(adev->dev, "failed to clear page "
+                       "tables on GEM object close (%ld)\n", r);
        ttm_eu_backoff_reservation(&ticket, &list);
 }
 
@@ -226,7 +234,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
                      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
                      AMDGPU_GEM_CREATE_VRAM_CLEARED |
                      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
-                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
+                     AMDGPU_GEM_CREATE_ENCRYPTED))
 
                return -EINVAL;
 
@@ -234,6 +243,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
        if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
                return -EINVAL;
 
+       if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
+               DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
+               return -EINVAL;
+       }
+
        /* create a gem object to contain this object in */
        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -854,7 +868,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
        attachment = READ_ONCE(bo->tbo.base.import_attach);
 
        if (attachment)
-               seq_printf(m, " imported from %p", dma_buf);
+               seq_printf(m, " imported from %p%s", dma_buf,
+                          attachment->peer2peer ? " P2P" : "");
        else if (dma_buf)
                seq_printf(m, " exported as %p", dma_buf);
 
index 6b9c9193cdfa72df9434fd16ccaf4367cfa56fb9..d612033a23ac638d8b6e0a56cdb48cb63eeb62a8 100644 (file)
@@ -48,7 +48,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
        return bit;
 }
 
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
                                 int *mec, int *pipe, int *queue)
 {
        *queue = bit % adev->gfx.mec.num_queue_per_pipe;
@@ -274,7 +274,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
                if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
                        continue;
 
-               amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+               amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
 
                /*
                 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
@@ -304,10 +304,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
        spin_lock_init(&kiq->ring_lock);
 
-       r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
-       if (r)
-               return r;
-
        ring->adev = NULL;
        ring->ring_obj = NULL;
        ring->use_doorbell = true;
@@ -318,9 +314,11 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
                return r;
 
        ring->eop_gpu_addr = kiq->eop_gpu_addr;
+       ring->no_scheduler = true;
        sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
        r = amdgpu_ring_init(adev, ring, 1024,
-                            irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+                            irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
 
@@ -329,7 +327,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
 {
-       amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
        amdgpu_ring_fini(ring);
 }
 
@@ -488,6 +485,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
        return amdgpu_ring_test_helper(kiq_ring);
 }
 
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+                                       int queue_bit)
+{
+       int mec, pipe, queue;
+       int set_resource_bit = 0;
+
+       amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+       set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
+
+       return set_resource_bit;
+}
+
 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 {
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -510,7 +520,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
                        break;
                }
 
-               queue_mask |= (1ull << i);
+               queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
        }
 
        DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
@@ -670,16 +680,23 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 {
        signed long r, cnt = 0;
        unsigned long flags;
-       uint32_t seq;
+       uint32_t seq, reg_val_offs = 0, value = 0;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_rreg);
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
+       if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+               pr_err("critical bug! too many kiq readers\n");
+               goto failed_unlock;
+       }
        amdgpu_ring_alloc(ring, 32);
-       amdgpu_ring_emit_rreg(ring, reg);
-       amdgpu_fence_emit_polling(ring, &seq);
+       amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -705,9 +722,18 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
        if (cnt > MAX_KIQ_REG_TRY)
                goto failed_kiq_read;
 
-       return adev->wb.wb[kiq->reg_val_offs];
+       mb();
+       value = adev->wb.wb[reg_val_offs];
+       amdgpu_device_wb_free(adev, reg_val_offs);
+       return value;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+failed_unlock:
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq_read:
+       if (reg_val_offs)
+               amdgpu_device_wb_free(adev, reg_val_offs);
        pr_err("failed to read reg:%x\n", reg);
        return ~0;
 }
@@ -725,7 +751,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
        spin_lock_irqsave(&kiq->ring_lock, flags);
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_wreg(ring, reg, v);
-       amdgpu_fence_emit_polling(ring, &seq);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -754,6 +783,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 
        return;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq_write:
        pr_err("failed to write reg:%x\n", reg);
 }
index 5825692d07e42a33a31004bebd6b7ebcb4936b09..d43c11671a384ccc1507cd8b63060e86f43622d8 100644 (file)
@@ -103,7 +103,6 @@ struct amdgpu_kiq {
        struct amdgpu_ring      ring;
        struct amdgpu_irq_src   irq;
        const struct kiq_pm4_funcs *pmf;
-       uint32_t                        reg_val_offs;
 };
 
 /*
@@ -286,13 +285,8 @@ struct amdgpu_gfx {
        bool                            me_fw_write_wait;
        bool                            cp_fw_write_wait;
        struct amdgpu_ring              gfx_ring[AMDGPU_MAX_GFX_RINGS];
-       struct drm_gpu_scheduler        *gfx_sched[AMDGPU_MAX_GFX_RINGS];
-       uint32_t                        num_gfx_sched;
        unsigned                        num_gfx_rings;
        struct amdgpu_ring              compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
-       struct drm_gpu_scheduler        **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
-       struct drm_gpu_scheduler        *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
-       uint32_t                        num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
        unsigned                        num_compute_rings;
        struct amdgpu_irq_src           eop_irq;
        struct amdgpu_irq_src           priv_reg_irq;
@@ -370,7 +364,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
 
 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
                                int pipe, int queue);
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
                                 int *mec, int *pipe, int *queue);
 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
                                     int pipe, int queue);
index 5884ab590486e6f941d67f931b34c4f0d2f9b01c..acabb57aa8af9e8007e49dc17b53071ed40f65eb 100644 (file)
@@ -136,8 +136,8 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
 /**
  * amdgpu_gmc_vram_location - try to find VRAM location
  *
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
  * @base: base address at which to put VRAM
  *
  * Function will try to place VRAM at base address provided
@@ -165,8 +165,8 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
 /**
  * amdgpu_gmc_gart_location - try to find GART location
  *
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
  *
  * Function will place try to place GART before or after VRAM.
  *
@@ -207,8 +207,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
 
 /**
  * amdgpu_gmc_agp_location - try to find AGP location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
  *
  * Function will place try to find a place for the AGP BAR in the MC address
  * space.
@@ -373,3 +373,38 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
 
        return 0;
 }
+
+/**
+ * amdgpu_tmz_set -- check and set if a device supports TMZ
+ * @adev: amdgpu_device pointer
+ *
+ * Check and set if an the device @adev supports Trusted Memory
+ * Zones (TMZ).
+ */
+void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_RAVEN:
+       case CHIP_RENOIR:
+       case CHIP_NAVI10:
+       case CHIP_NAVI14:
+       case CHIP_NAVI12:
+               /* Don't enable it by default yet.
+                */
+               if (amdgpu_tmz < 1) {
+                       adev->gmc.tmz_enabled = false;
+                       dev_info(adev->dev,
+                                "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
+               } else {
+                       adev->gmc.tmz_enabled = true;
+                       dev_info(adev->dev,
+                                "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
+               }
+               break;
+       default:
+               adev->gmc.tmz_enabled = false;
+               dev_warn(adev->dev,
+                        "Trusted Memory Zone (TMZ) feature not supported\n");
+               break;
+       }
+}
index 7546da0cc70c7019c94f58fb0ee66debcdc93a22..2bd9423c1dabbbd3106e98ff4b3c823b994e35bd 100644 (file)
@@ -213,6 +213,8 @@ struct amdgpu_gmc {
        } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
        uint64_t                last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
 
+       bool tmz_enabled;
+
        const struct amdgpu_gmc_funcs   *gmc_funcs;
 
        struct amdgpu_xgmi xgmi;
@@ -276,4 +278,6 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
 
+extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+
 #endif
index ccbd7acfc4cb1be94259e19528cc271823305010..b91853fd66d375ee8fb13b7424e1e4d1ff1a9df7 100644 (file)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                 unsigned size, struct amdgpu_ib *ib)
+                 unsigned size, enum amdgpu_ib_pool_type pool_type,
+                 struct amdgpu_ib *ib)
 {
        int r;
 
        if (size) {
-               r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
+               r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
                                      &ib->sa_bo, size, 256);
                if (r) {
                        dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -131,6 +132,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        uint64_t fence_ctx;
        uint32_t status = 0, alloc_size;
        unsigned fence_flags = 0;
+       bool secure;
 
        unsigned i;
        int r = 0;
@@ -159,6 +161,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                return -EINVAL;
        }
 
+       if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
+           (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
+               dev_err(adev->dev, "secure submissions not supported on compute rings\n");
+               return -EINVAL;
+       }
+
        alloc_size = ring->funcs->emit_frame_size + num_ibs *
                ring->funcs->emit_ib_size;
 
@@ -181,6 +189,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                dma_fence_put(tmp);
        }
 
+       if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
+               ring->funcs->emit_mem_sync(ring);
+
        if (ring->funcs->insert_start)
                ring->funcs->insert_start(ring);
 
@@ -215,6 +226,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                amdgpu_ring_emit_cntxcntl(ring, status);
        }
 
+       /* Setup initial TMZiness and send it off.
+        */
+       secure = false;
+       if (job && ring->funcs->emit_frame_cntl) {
+               secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
+               amdgpu_ring_emit_frame_cntl(ring, true, secure);
+       }
+
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
@@ -226,12 +245,20 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
 
+               if (job && ring->funcs->emit_frame_cntl) {
+                       if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+                               amdgpu_ring_emit_frame_cntl(ring, false, secure);
+                               secure = !secure;
+                               amdgpu_ring_emit_frame_cntl(ring, true, secure);
+                       }
+               }
+
                amdgpu_ring_emit_ib(ring, job, ib, status);
                status &= ~AMDGPU_HAVE_CTX_SWITCH;
        }
 
-       if (ring->funcs->emit_tmz)
-               amdgpu_ring_emit_tmz(ring, false);
+       if (job && ring->funcs->emit_frame_cntl)
+               amdgpu_ring_emit_frame_cntl(ring, false, secure);
 
 #ifdef CONFIG_X86_64
        if (!(adev->flags & AMD_IS_APU))
@@ -280,22 +307,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  */
 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
 {
-       int r;
+       unsigned size;
+       int r, i;
 
-       if (adev->ib_pool_ready) {
+       if (adev->ib_pool_ready)
                return 0;
-       }
-       r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
-                                     AMDGPU_IB_POOL_SIZE*64*1024,
-                                     AMDGPU_GPU_PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT);
-       if (r) {
-               return r;
-       }
 
+       for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
+               if (i == AMDGPU_IB_POOL_DIRECT)
+                       size = PAGE_SIZE * 2;
+               else
+                       size = AMDGPU_IB_POOL_SIZE;
+
+               r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+                                             size, AMDGPU_GPU_PAGE_SIZE,
+                                             AMDGPU_GEM_DOMAIN_GTT);
+               if (r)
+                       goto error;
+       }
        adev->ib_pool_ready = true;
 
        return 0;
+
+error:
+       while (i--)
+               amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+       return r;
 }
 
 /**
@@ -308,10 +345,14 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
  */
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
 {
-       if (adev->ib_pool_ready) {
-               amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
-               adev->ib_pool_ready = false;
-       }
+       int i;
+
+       if (!adev->ib_pool_ready)
+               return;
+
+       for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+               amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+       adev->ib_pool_ready = false;
 }
 
 /**
@@ -326,9 +367,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
  */
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 {
-       unsigned i;
-       int r, ret = 0;
        long tmo_gfx, tmo_mm;
+       int r, ret = 0;
+       unsigned i;
 
        tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
        if (amdgpu_sriov_vf(adev)) {
@@ -406,10 +447,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
 
-       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+       seq_printf(m, "--------------------- DELAYED --------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+                                    m);
+       seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+                                    m);
+       seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
 
        return 0;
-
 }
 
 static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
index 3a67f6c046d4d4579cfb63bcfd20f4f4e35027db..fe92dcd94d4ae6af2c7a1b27ab8709d4cdea51de 100644 (file)
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
            !dma_fence_is_later(updates, (*id)->flushed_updates))
            updates = NULL;
 
-       if ((*id)->owner != vm->direct.fence_context ||
+       if ((*id)->owner != vm->immediate.fence_context ||
            job->vm_pd_addr != (*id)->pd_gpu_addr ||
            updates || !(*id)->last_flush ||
            ((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                struct dma_fence *flushed;
 
                /* Check all the prerequisites to using this VMID */
-               if ((*id)->owner != vm->direct.fence_context)
+               if ((*id)->owner != vm->immediate.fence_context)
                        continue;
 
                if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        }
 
        id->pd_gpu_addr = job->vm_pd_addr;
-       id->owner = vm->direct.fence_context;
+       id->owner = vm->immediate.fence_context;
 
        if (job->vm_needs_flush) {
                dma_fence_put(id->last_flush);
index 5ed4227f304bd49050139505622cdf2673878e8a..0cc4c67f95f721f7671d5e46824b0b0d5e70e7fa 100644 (file)
@@ -260,7 +260,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
                nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
                if (nvec > 0) {
                        adev->irq.msi_enabled = true;
-                       dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
+                       dev_dbg(adev->dev, "using MSI/MSI-X.\n");
                }
        }
 
index 4981e443a88473050e27559e4f43acc1c5a13d58..47207188c5692ad068217793a957471e13c3a035 100644 (file)
@@ -33,6 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
        struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
        struct amdgpu_job *job = to_amdgpu_job(s_job);
        struct amdgpu_task_info ti;
+       struct amdgpu_device *adev = ring->adev;
 
        memset(&ti, 0, sizeof(struct amdgpu_task_info));
 
@@ -49,10 +50,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
        DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
                  ti.process_name, ti.tgid, ti.task_name, ti.pid);
 
-       if (amdgpu_device_should_recover_gpu(ring->adev))
+       if (amdgpu_device_should_recover_gpu(ring->adev)) {
                amdgpu_device_gpu_recover(ring->adev, job);
-       else
+       } else {
                drm_sched_suspend_timeout(&ring->sched);
+               if (amdgpu_sriov_vf(adev))
+                       adev->virt.tdr_debug = true;
+       }
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -87,7 +91,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
 }
 
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
-                            struct amdgpu_job **job)
+               enum amdgpu_ib_pool_type pool_type,
+               struct amdgpu_job **job)
 {
        int r;
 
@@ -95,7 +100,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
        if (r)
                return r;
 
-       r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+       r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
        if (r)
                kfree(*job);
 
@@ -140,7 +145,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
                      void *owner, struct dma_fence **f)
 {
-       enum drm_sched_priority priority;
        int r;
 
        if (!f)
@@ -152,7 +156,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
-       priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
        return 0;
index 3f7b8433d17904fd8e535219108a47f0f54c9e3d..81caac9b958a4a52fcdd9d0a06890ed4b6514eaf 100644 (file)
@@ -38,6 +38,7 @@
 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
 
 struct amdgpu_fence;
+enum amdgpu_ib_pool_type;
 
 struct amdgpu_job {
        struct drm_sched_job    base;
@@ -61,14 +62,12 @@ struct amdgpu_job {
        /* user fence handling */
        uint64_t                uf_addr;
        uint64_t                uf_sequence;
-
 };
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
                     struct amdgpu_job **job, struct amdgpu_vm *vm);
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
-                            struct amdgpu_job **job);
-
+               enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
 void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
index 5727f00afc8e6503647256cb171473eb0670aeda..d31d65e6b0398eaf6c20121f815b8fc4a88a1226 100644 (file)
@@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
        const unsigned ib_size_dw = 16;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
index bd9ef9cc86deae6c7183a6ccab20a33dbb869cce..5131a0a1bc8aa90c8c9dde5482ff466ee0ce8977 100644 (file)
@@ -43,8 +43,6 @@ struct amdgpu_jpeg {
        uint8_t num_jpeg_inst;
        struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
        struct amdgpu_jpeg_reg internal;
-       struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
-       uint32_t num_jpeg_sched;
        unsigned harvest_config;
        struct delayed_work idle_work;
        enum amd_powergating_state cur_state;
index fd1dc3236ecae0a4c169677a39cc07fe43cb8d2a..682a514f17944c6a2edd72467c5f2a44caf4023e 100644 (file)
@@ -183,18 +183,18 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        /* Call ACPI methods: require modeset init
         * but failure is not fatal
         */
-       if (!r) {
-               acpi_status = amdgpu_acpi_init(adev);
-               if (acpi_status)
-                       dev_dbg(&dev->pdev->dev,
-                               "Error during ACPI methods call\n");
-       }
+
+       acpi_status = amdgpu_acpi_init(adev);
+       if (acpi_status)
+               dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
 
        if (adev->runpm) {
-               dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
+               /* only need to skip on ATPX */
+               if (amdgpu_device_supports_boco(dev) &&
+                   !amdgpu_is_atpx_hybrid())
+                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
-               pm_runtime_set_active(dev->dev);
                pm_runtime_allow(dev->dev);
                pm_runtime_mark_last_busy(dev->dev);
                pm_runtime_put_autosuspend(dev->dev);
index 919bd566ba3cfc20670da24a2647ae1425f1c4dd..edaac242ff85708a8f57635791c954c8792c0dcc 100644 (file)
@@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs {
                                      u32 *flags);
        void (*ih_control)(struct amdgpu_device *adev);
        void (*init_registers)(struct amdgpu_device *adev);
-       void (*detect_hw_virt)(struct amdgpu_device *adev);
        void (*remap_hdp_registers)(struct amdgpu_device *adev);
        void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
        void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
index c687f5415b3f1d03610f4904a42748ece428f618..3d822eba9a5d708b438e7a9cf90dbf8982422f95 100644 (file)
@@ -753,7 +753,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 
        return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
                                  amdgpu_bo_size(shadow), NULL, fence,
-                                 true, false);
+                                 true, false, false);
 }
 
 /**
index 5e39ecd8cc28d099f31ba23e3dcbfa8d9fae515d..7d41f7b9a340574c3422e1ef9a570a27b51351cd 100644 (file)
@@ -229,6 +229,17 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
        return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
 }
 
+/**
+ * amdgpu_bo_encrypted - test if the BO is encrypted
+ * @bo: pointer to a buffer object
+ *
+ * Return true if the buffer object is encrypted, false otherwise.
+ */
+static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
+{
+       return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
+}
+
 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 
index abe94a55ecad4bf5fb489669a1fe7d5d2fccfc8c..b75362bf07425e3bddd6ef886f5bab2052288cc5 100644 (file)
@@ -154,9 +154,9 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
  *
  */
 
-static ssize_t amdgpu_get_dpm_state(struct device *dev,
-                                   struct device_attribute *attr,
-                                   char *buf)
+static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -189,10 +189,10 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 }
 
-static ssize_t amdgpu_set_dpm_state(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf,
-                                   size_t count)
+static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf,
+                                         size_t count)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -294,9 +294,9 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
  *
  */
 
-static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
-                                               struct device_attribute *attr,
-                                                               char *buf)
+static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
+                                                           struct device_attribute *attr,
+                                                           char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -332,10 +332,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
                        "unknown");
 }
 
-static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
-                                                      struct device_attribute *attr,
-                                                      const char *buf,
-                                                      size_t count)
+static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
+                                                           struct device_attribute *attr,
+                                                           const char *buf,
+                                                           size_t count)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -444,8 +444,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
                ret = smu_get_power_num_states(&adev->smu, &data);
                if (ret)
                        return ret;
-       } else if (adev->powerplay.pp_funcs->get_pp_num_states)
+       } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
                amdgpu_dpm_get_pp_num_states(adev, &data);
+       } else {
+               memset(&data, 0, sizeof(data));
+       }
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -870,10 +873,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
  * the corresponding bit from original ppfeature masks and input the
  * new ppfeature masks.
  */
-static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
-               struct device_attribute *attr,
-               const char *buf,
-               size_t count)
+static ssize_t amdgpu_set_pp_features(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf,
+                                     size_t count)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -914,9 +917,9 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
        return count;
 }
 
-static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+static ssize_t amdgpu_get_pp_features(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -1660,9 +1663,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
  * The SMU firmware computes a percentage of load based on the
  * aggregate activity level in the IP cores.
  */
-static ssize_t amdgpu_get_busy_percent(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -1696,9 +1699,9 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
  * The SMU firmware computes a percentage of load based on the
  * aggregate activity level in the IP cores.
  */
-static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -1787,57 +1790,174 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
        return 0;
 }
 
-static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
-static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
-                  amdgpu_get_dpm_forced_performance_level,
-                  amdgpu_set_dpm_forced_performance_level);
-static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
-static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
-static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_force_state,
-               amdgpu_set_pp_force_state);
-static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_table,
-               amdgpu_set_pp_table);
-static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_sclk,
-               amdgpu_set_pp_dpm_sclk);
-static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_mclk,
-               amdgpu_set_pp_dpm_mclk);
-static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_socclk,
-               amdgpu_set_pp_dpm_socclk);
-static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_fclk,
-               amdgpu_set_pp_dpm_fclk);
-static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_dcefclk,
-               amdgpu_set_pp_dpm_dcefclk);
-static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_pcie,
-               amdgpu_set_pp_dpm_pcie);
-static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_sclk_od,
-               amdgpu_set_pp_sclk_od);
-static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_mclk_od,
-               amdgpu_set_pp_mclk_od);
-static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_power_profile_mode,
-               amdgpu_set_pp_power_profile_mode);
-static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_od_clk_voltage,
-               amdgpu_set_pp_od_clk_voltage);
-static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
-               amdgpu_get_busy_percent, NULL);
-static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
-               amdgpu_get_memory_busy_percent, NULL);
-static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
-static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_feature_status,
-               amdgpu_set_pp_feature_status);
-static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
+static struct amdgpu_device_attr amdgpu_device_attrs[] = {
+       AMDGPU_DEVICE_ATTR_RW(power_dpm_state,                          ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,        ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RO(pp_num_states,                            ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(pp_cur_state,                             ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_force_state,                           ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_table,                                 ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,                            ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,                           ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,                              ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,                               ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,                               ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,                    ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,                        ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,                         ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,                         ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(pcie_bw,                                  ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_features,                              ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(unique_id,                                ATTR_FLAG_BASIC),
+};
+
+static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+                              uint32_t mask)
+{
+       struct device_attribute *dev_attr = &attr->dev_attr;
+       const char *attr_name = dev_attr->attr.name;
+       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+       enum amd_asic_type asic_type = adev->asic_type;
+
+       if (!(attr->flags & mask)) {
+               attr->states = ATTR_STATE_UNSUPPORTED;
+               return 0;
+       }
+
+#define DEVICE_ATTR_IS(_name)  (!strcmp(attr_name, #_name))
+
+       if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+               if (asic_type <= CHIP_VEGA10)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+               if (asic_type <= CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+               if (asic_type < CHIP_VEGA20)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+               if (asic_type == CHIP_ARCTURUS)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
+               attr->states = ATTR_STATE_UNSUPPORTED;
+               if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+                   (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+               if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pcie_bw)) {
+               /* PCIe Perf counters won't work on APU nodes */
+               if (adev->flags & AMD_IS_APU)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(unique_id)) {
+               if (!adev->unique_id)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_features)) {
+               if (adev->flags & AMD_IS_APU || asic_type <= CHIP_VEGA10)
+                       attr->states = ATTR_STATE_UNSUPPORTED;
+       }
+
+       if (asic_type == CHIP_ARCTURUS) {
+               /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+               if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+                   DEVICE_ATTR_IS(pp_dpm_socclk) ||
+                   DEVICE_ATTR_IS(pp_dpm_fclk)) {
+                       dev_attr->attr.mode &= ~S_IWUGO;
+                       dev_attr->store = NULL;
+               }
+       }
+
+#undef DEVICE_ATTR_IS
+
+       return 0;
+}
+
+
+static int amdgpu_device_attr_create(struct amdgpu_device *adev,
+                                    struct amdgpu_device_attr *attr,
+                                    uint32_t mask)
+{
+       int ret = 0;
+       struct device_attribute *dev_attr = &attr->dev_attr;
+       const char *name = dev_attr->attr.name;
+       int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+                          uint32_t mask) = default_attr_update;
+
+       BUG_ON(!attr);
+
+       attr_update = attr->attr_update ? attr_update : default_attr_update;
+
+       ret = attr_update(adev, attr, mask);
+       if (ret) {
+               dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
+                       name, ret);
+               return ret;
+       }
+
+       /* the attr->states maybe changed after call attr->attr_update function */
+       if (attr->states == ATTR_STATE_UNSUPPORTED)
+               return 0;
+
+       ret = device_create_file(adev->dev, dev_attr);
+       if (ret) {
+               dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
+                       name, ret);
+       }
+
+       attr->states = ATTR_STATE_SUPPORTED;
+
+       return ret;
+}
+
+static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
+{
+       struct device_attribute *dev_attr = &attr->dev_attr;
+
+       if (attr->states == ATTR_STATE_UNSUPPORTED)
+               return;
+
+       device_remove_file(adev->dev, dev_attr);
+
+       attr->states = ATTR_STATE_UNSUPPORTED;
+}
+
+static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
+                                           struct amdgpu_device_attr *attrs,
+                                           uint32_t counts,
+                                           uint32_t mask)
+{
+       int ret = 0;
+       uint32_t i = 0;
+
+       for (i = 0; i < counts; i++) {
+               ret = amdgpu_device_attr_create(adev, &attrs[i], mask);
+               if (ret)
+                       goto failed;
+       }
+
+       return 0;
+
+failed:
+       for (; i > 0; i--) {
+               amdgpu_device_attr_remove(adev, &attrs[i]);
+       }
+
+       return ret;
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+                                            struct amdgpu_device_attr *attrs,
+                                            uint32_t counts)
+{
+       uint32_t i = 0;
+
+       for (i = 0; i < counts; i++)
+               amdgpu_device_attr_remove(adev, &attrs[i]);
+}
 
 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
                                      struct device_attribute *attr,
@@ -3238,8 +3358,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
 
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 {
-       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
        int ret;
+       uint32_t mask = 0;
 
        if (adev->pm.sysfs_initialized)
                return 0;
@@ -3257,160 +3377,25 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
                return ret;
        }
 
-       ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
-       if (ret) {
-               DRM_ERROR("failed to create device file for dpm state\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-       if (ret) {
-               DRM_ERROR("failed to create device file for dpm state\n");
-               return ret;
-       }
-
-
-       ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_num_states\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_cur_state\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_force_state\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_table);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_table\n");
-               return ret;
-       }
-
-       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_dpm_sclk\n");
-               return ret;
-       }
-
-       /* Arcturus does not support standalone mclk/socclk/fclk level setting */
-       if (adev->asic_type == CHIP_ARCTURUS) {
-               dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
-               dev_attr_pp_dpm_mclk.store = NULL;
-
-               dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
-               dev_attr_pp_dpm_socclk.store = NULL;
-
-               dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
-               dev_attr_pp_dpm_fclk.store = NULL;
+       switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+       case SRIOV_VF_MODE_ONE_VF:
+               mask = ATTR_FLAG_ONEVF;
+               break;
+       case SRIOV_VF_MODE_MULTI_VF:
+               mask = 0;
+               break;
+       case SRIOV_VF_MODE_BARE_METAL:
+       default:
+               mask = ATTR_FLAG_MASK_ALL;
+               break;
        }
 
-       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_dpm_mclk\n");
-               return ret;
-       }
-       if (adev->asic_type >= CHIP_VEGA10) {
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_socclk\n");
-                       return ret;
-               }
-               if (adev->asic_type != CHIP_ARCTURUS) {
-                       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
-                       if (ret) {
-                               DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
-                               return ret;
-                       }
-               }
-       }
-       if (adev->asic_type >= CHIP_VEGA20) {
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_fclk\n");
-                       return ret;
-               }
-       }
-       if (adev->asic_type != CHIP_ARCTURUS) {
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_pcie\n");
-                       return ret;
-               }
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_sclk_od\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_mclk_od\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev,
-                       &dev_attr_pp_power_profile_mode);
-       if (ret) {
-               DRM_ERROR("failed to create device file "
-                               "pp_power_profile_mode\n");
-               return ret;
-       }
-       if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
-           (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
-               ret = device_create_file(adev->dev,
-                               &dev_attr_pp_od_clk_voltage);
-               if (ret) {
-                       DRM_ERROR("failed to create device file "
-                                       "pp_od_clk_voltage\n");
-                       return ret;
-               }
-       }
-       ret = device_create_file(adev->dev,
-                       &dev_attr_gpu_busy_percent);
-       if (ret) {
-               DRM_ERROR("failed to create device file "
-                               "gpu_busy_level\n");
-               return ret;
-       }
-       /* APU does not have its own dedicated memory */
-       if (!(adev->flags & AMD_IS_APU) &&
-            (adev->asic_type != CHIP_VEGA10)) {
-               ret = device_create_file(adev->dev,
-                               &dev_attr_mem_busy_percent);
-               if (ret) {
-                       DRM_ERROR("failed to create device file "
-                                       "mem_busy_percent\n");
-                       return ret;
-               }
-       }
-       /* PCIe Perf counters won't work on APU nodes */
-       if (!(adev->flags & AMD_IS_APU)) {
-               ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pcie_bw\n");
-                       return ret;
-               }
-       }
-       if (adev->unique_id)
-               ret = device_create_file(adev->dev, &dev_attr_unique_id);
-       if (ret) {
-               DRM_ERROR("failed to create device file unique_id\n");
+       ret = amdgpu_device_attr_create_groups(adev,
+                                              amdgpu_device_attrs,
+                                              ARRAY_SIZE(amdgpu_device_attrs),
+                                              mask);
+       if (ret)
                return ret;
-       }
-
-       if ((adev->asic_type >= CHIP_VEGA10) &&
-           !(adev->flags & AMD_IS_APU)) {
-               ret = device_create_file(adev->dev,
-                               &dev_attr_pp_features);
-               if (ret) {
-                       DRM_ERROR("failed to create device file "
-                                       "pp_features\n");
-                       return ret;
-               }
-       }
 
        adev->pm.sysfs_initialized = true;
 
@@ -3419,51 +3404,15 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 
 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
 {
-       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
        if (adev->pm.dpm_enabled == 0)
                return;
 
        if (adev->pm.int_hwmon_dev)
                hwmon_device_unregister(adev->pm.int_hwmon_dev);
-       device_remove_file(adev->dev, &dev_attr_power_dpm_state);
-       device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-
-       device_remove_file(adev->dev, &dev_attr_pp_num_states);
-       device_remove_file(adev->dev, &dev_attr_pp_cur_state);
-       device_remove_file(adev->dev, &dev_attr_pp_force_state);
-       device_remove_file(adev->dev, &dev_attr_pp_table);
-
-       device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
-       device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
-       if (adev->asic_type >= CHIP_VEGA10) {
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
-               if (adev->asic_type != CHIP_ARCTURUS)
-                       device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
-       }
-       if (adev->asic_type != CHIP_ARCTURUS)
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
-       if (adev->asic_type >= CHIP_VEGA20)
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
-       device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
-       device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
-       device_remove_file(adev->dev,
-                       &dev_attr_pp_power_profile_mode);
-       if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
-           (!is_support_sw_smu(adev) && hwmgr->od_enabled))
-               device_remove_file(adev->dev,
-                               &dev_attr_pp_od_clk_voltage);
-       device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
-       if (!(adev->flags & AMD_IS_APU) &&
-            (adev->asic_type != CHIP_VEGA10))
-               device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
-       if (!(adev->flags & AMD_IS_APU))
-               device_remove_file(adev->dev, &dev_attr_pcie_bw);
-       if (adev->unique_id)
-               device_remove_file(adev->dev, &dev_attr_unique_id);
-       if ((adev->asic_type >= CHIP_VEGA10) &&
-           !(adev->flags & AMD_IS_APU))
-               device_remove_file(adev->dev, &dev_attr_pp_features);
+
+       amdgpu_device_attr_remove_groups(adev,
+                                        amdgpu_device_attrs,
+                                        ARRAY_SIZE(amdgpu_device_attrs));
 }
 
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
index 5db0ef86e84cfd33e7b430e1cb073a5570135e55..48e8086baf33f779f67cc301f2e143473e8dd443 100644 (file)
@@ -30,6 +30,52 @@ struct cg_flag_name
        const char *name;
 };
 
+enum amdgpu_device_attr_flags {
+       ATTR_FLAG_BASIC = (1 << 0),
+       ATTR_FLAG_ONEVF = (1 << 16),
+};
+
+#define ATTR_FLAG_TYPE_MASK    (0x0000ffff)
+#define ATTR_FLAG_MODE_MASK    (0xffff0000)
+#define ATTR_FLAG_MASK_ALL     (0xffffffff)
+
+enum amdgpu_device_attr_states {
+       ATTR_STATE_UNSUPPORTED = 0,
+       ATTR_STATE_SUPPORTED,
+};
+
+struct amdgpu_device_attr {
+       struct device_attribute dev_attr;
+       enum amdgpu_device_attr_flags flags;
+       enum amdgpu_device_attr_states states;
+       int (*attr_update)(struct amdgpu_device *adev,
+                          struct amdgpu_device_attr* attr,
+                          uint32_t mask);
+};
+
+#define to_amdgpu_device_attr(_dev_attr) \
+       container_of(_dev_attr, struct amdgpu_device_attr, dev_attr)
+
+#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
+       { .dev_attr = __ATTR(_name, _mode, _show, _store),              \
+         .flags = _flags,                                              \
+         .states = ATTR_STATE_SUPPORTED,                                       \
+         ##__VA_ARGS__, }
+
+#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...)                  \
+       __AMDGPU_DEVICE_ATTR(_name, _mode,                              \
+                            amdgpu_get_##_name, amdgpu_set_##_name,    \
+                            _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RW(_name, _flags, ...)                      \
+       AMDGPU_DEVICE_ATTR(_name, S_IRUGO | S_IWUSR,                    \
+                          _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RO(_name, _flags, ...)                      \
+       __AMDGPU_DEVICE_ATTR(_name, S_IRUGO,                            \
+                            amdgpu_get_##_name, NULL,                  \
+                            _flags, ##__VA_ARGS__)
+
 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
 int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
index deaa26808841609569d173a8f56f20e4d24aca16..ddb4af0cc7020b4fb7ef62fea47238206828182a 100644 (file)
 
 #include "amdgpu_ras.h"
 
-static void psp_set_funcs(struct amdgpu_device *adev);
-
 static int psp_sysfs_init(struct amdgpu_device *adev);
 static void psp_sysfs_fini(struct amdgpu_device *adev);
 
+static int psp_load_smu_fw(struct psp_context *psp);
+
 /*
  * Due to DF Cstate management centralized to PMFW, the firmware
  * loading sequence will be updated as below:
@@ -80,8 +80,6 @@ static int psp_early_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct psp_context *psp = &adev->psp;
 
-       psp_set_funcs(adev);
-
        switch (adev->asic_type) {
        case CHIP_VEGA10:
        case CHIP_VEGA12:
@@ -201,6 +199,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
        int index;
        int timeout = 2000;
        bool ras_intr = false;
+       bool skip_unsupport = false;
 
        mutex_lock(&psp->mutex);
 
@@ -232,6 +231,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
                amdgpu_asic_invalidate_hdp(psp->adev, NULL);
        }
 
+       /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */
+       skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev);
+
        /* In some cases, psp response status is not 0 even there is no
         * problem while the command is submitted. Some version of PSP FW
         * doesn't write 0 to that field.
@@ -239,7 +241,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
         * during psp initialization to avoid breaking hw_init and it doesn't
         * return -EINVAL.
         */
-       if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
+       if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
                if (ucode)
                        DRM_WARN("failed to load ucode id (%d) ",
                                  ucode->ucode_id);
@@ -268,7 +270,7 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
                                 struct psp_gfx_cmd_resp *cmd,
                                 uint64_t tmr_mc, uint32_t size)
 {
-       if (psp_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(psp->adev))
                cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
        else
                cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
@@ -662,6 +664,121 @@ int psp_xgmi_initialize(struct psp_context *psp)
        return ret;
 }
 
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       int ret;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+       /* Invoke xgmi ta to get hive id */
+       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+       if (ret)
+               return ret;
+
+       *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+       return 0;
+}
+
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       int ret;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+       /* Invoke xgmi ta to get the node id */
+       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+       if (ret)
+               return ret;
+
+       *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+       return 0;
+}
+
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+       struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+       int i;
+       int ret;
+
+       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+               return -EINVAL;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       /* Fill in the shared memory with topology information as input */
+       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+       topology_info_input->num_nodes = number_devices;
+
+       for (i = 0; i < topology_info_input->num_nodes; i++) {
+               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+               topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+       }
+
+       /* Invoke xgmi ta to get the topology information */
+       ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+       if (ret)
+               return ret;
+
+       /* Read the output topology information from the shared memory */
+       topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+       topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+       for (i = 0; i < topology->num_nodes; i++) {
+               topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+               topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+               topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+               topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+       }
+
+       return 0;
+}
+
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+       int i;
+
+       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+               return -EINVAL;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+       topology_info_input->num_nodes = number_devices;
+
+       for (i = 0; i < topology_info_input->num_nodes; i++) {
+               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+               topology_info_input->nodes[i].is_sharing_enabled = 1;
+               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+       }
+
+       /* Invoke xgmi ta to set topology information */
+       return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
+}
+
 // ras begin
 static int psp_ras_init_shared_buf(struct psp_context *psp)
 {
@@ -744,13 +861,40 @@ static int psp_ras_unload(struct psp_context *psp)
 
 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
+       struct ta_ras_shared_memory *ras_cmd;
+       int ret;
+
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+
        /*
         * TODO: bypass the loading in sriov for now
         */
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+       ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+
+       if (amdgpu_ras_intr_triggered())
+               return ret;
+
+       if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
+       {
+               DRM_WARN("RAS: Unsupported Interface");
+               return -EINVAL;
+       }
+
+       if (!ret) {
+               if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+                       dev_warn(psp->adev->dev, "ECC switch disabled\n");
+
+                       ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
+               }
+               else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+                       dev_warn(psp->adev->dev,
+                                "RAS internal register access blocked\n");
+       }
+
+       return ret;
 }
 
 int psp_ras_enable_features(struct psp_context *psp,
@@ -834,6 +978,33 @@ static int psp_ras_initialize(struct psp_context *psp)
 
        return 0;
 }
+
+int psp_ras_trigger_error(struct psp_context *psp,
+                         struct ta_ras_trigger_error_input *info)
+{
+       struct ta_ras_shared_memory *ras_cmd;
+       int ret;
+
+       if (!psp->ras.ras_initialized)
+               return -EINVAL;
+
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+       ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+       ras_cmd->ras_in_message.trigger_error = *info;
+
+       ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+       if (ret)
+               return -EINVAL;
+
+       /* If err_event_athub occurs error inject was successful, however
+          return status from TA is no long reliable */
+       if (amdgpu_ras_intr_triggered())
+               return 0;
+
+       return ras_cmd->ras_status;
+}
 // ras end
 
 // HDCP start
@@ -884,6 +1055,7 @@ static int psp_hdcp_load(struct psp_context *psp)
        if (!ret) {
                psp->hdcp_context.hdcp_initialized = true;
                psp->hdcp_context.session_id = cmd->resp.session_id;
+               mutex_init(&psp->hdcp_context.mutex);
        }
 
        kfree(cmd);
@@ -1029,6 +1201,7 @@ static int psp_dtm_load(struct psp_context *psp)
        if (!ret) {
                psp->dtm_context.dtm_initialized = true;
                psp->dtm_context.session_id = cmd->resp.session_id;
+               mutex_init(&psp->dtm_context.mutex);
        }
 
        kfree(cmd);
@@ -1169,16 +1342,20 @@ static int psp_hw_start(struct psp_context *psp)
        }
 
        /*
-        * For those ASICs with DF Cstate management centralized
+        * For ASICs with DF Cstate management centralized
         * to PMFW, TMR setup should be performed after PMFW
         * loaded and before other non-psp firmware loaded.
         */
-       if (!psp->pmfw_centralized_cstate_management) {
-               ret = psp_tmr_load(psp);
-               if (ret) {
-                       DRM_ERROR("PSP load tmr failed!\n");
+       if (psp->pmfw_centralized_cstate_management) {
+               ret = psp_load_smu_fw(psp);
+               if (ret)
                        return ret;
-               }
+       }
+
+       ret = psp_tmr_load(psp);
+       if (ret) {
+               DRM_ERROR("PSP load tmr failed!\n");
+               return ret;
        }
 
        return 0;
@@ -1355,7 +1532,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
 }
 
 static int psp_execute_np_fw_load(struct psp_context *psp,
-                              struct amdgpu_firmware_info *ucode)
+                                 struct amdgpu_firmware_info *ucode)
 {
        int ret = 0;
 
@@ -1369,64 +1546,95 @@ static int psp_execute_np_fw_load(struct psp_context *psp,
        return ret;
 }
 
+static int psp_load_smu_fw(struct psp_context *psp)
+{
+       int ret;
+       struct amdgpu_device* adev = psp->adev;
+       struct amdgpu_firmware_info *ucode =
+                       &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+
+       if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
+               return 0;
+
+
+       if (adev->in_gpu_reset) {
+               ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
+               if (ret) {
+                       DRM_WARN("Failed to set MP1 state prepare for reload\n");
+               }
+       }
+
+       ret = psp_execute_np_fw_load(psp, ucode);
+
+       if (ret)
+               DRM_ERROR("PSP load smu failed!\n");
+
+       return ret;
+}
+
+static bool fw_load_skip_check(struct psp_context *psp,
+                              struct amdgpu_firmware_info *ucode)
+{
+       if (!ucode->fw)
+               return true;
+
+       if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
+           (psp_smu_reload_quirk(psp) ||
+            psp->autoload_supported ||
+            psp->pmfw_centralized_cstate_management))
+               return true;
+
+       if (amdgpu_sriov_vf(psp->adev) &&
+          (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
+               /*skip ucode loading in SRIOV VF */
+               return true;
+
+       if (psp->autoload_supported &&
+           (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
+            ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
+               /* skip mec JT when autoload is enabled */
+               return true;
+
+       return false;
+}
+
 static int psp_np_fw_load(struct psp_context *psp)
 {
        int i, ret;
        struct amdgpu_firmware_info *ucode;
        struct amdgpu_device* adev = psp->adev;
 
-       if (psp->autoload_supported ||
-           psp->pmfw_centralized_cstate_management) {
-               ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
-               if (!ucode->fw || amdgpu_sriov_vf(adev))
-                       goto out;
-
-               ret = psp_execute_np_fw_load(psp, ucode);
+       if (psp->autoload_supported &&
+           !psp->pmfw_centralized_cstate_management) {
+               ret = psp_load_smu_fw(psp);
                if (ret)
                        return ret;
        }
 
-       if (psp->pmfw_centralized_cstate_management) {
-               ret = psp_tmr_load(psp);
-               if (ret) {
-                       DRM_ERROR("PSP load tmr failed!\n");
-                       return ret;
-               }
-       }
-
-out:
        for (i = 0; i < adev->firmware.max_ucodes; i++) {
                ucode = &adev->firmware.ucode[i];
-               if (!ucode->fw)
-                       continue;
 
                if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
-                   (psp_smu_reload_quirk(psp) ||
-                    psp->autoload_supported ||
-                    psp->pmfw_centralized_cstate_management))
-                       continue;
-
-               if (amdgpu_sriov_vf(adev) &&
-                  (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
-                    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
-                       /*skip ucode loading in SRIOV VF */
+                   !fw_load_skip_check(psp, ucode)) {
+                       ret = psp_load_smu_fw(psp);
+                       if (ret)
+                               return ret;
                        continue;
+               }
 
-               if (psp->autoload_supported &&
-                   (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
-                    ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
-                       /* skip mec JT when autoload is enabled */
+               if (fw_load_skip_check(psp, ucode))
                        continue;
 
                psp_print_fw_hdr(psp, ucode);
@@ -1438,17 +1646,12 @@ out:
                /* Start rlc autoload after psp recieved all the gfx firmware */
                if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
                    AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
-                       ret = psp_rlc_autoload(psp);
+                       ret = psp_rlc_autoload_start(psp);
                        if (ret) {
                                DRM_ERROR("Failed to start rlc autoload\n");
                                return ret;
                        }
                }
-#if 0
-               /* check if firmware loaded sucessfully */
-               if (!amdgpu_psp_check_fw_loading_status(adev, i))
-                       return -EINVAL;
-#endif
        }
 
        return 0;
@@ -1806,19 +2009,110 @@ int psp_ring_cmd_submit(struct psp_context *psp,
        return 0;
 }
 
-static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
-                                       enum AMDGPU_UCODE_ID ucode_type)
+int psp_init_asd_microcode(struct psp_context *psp,
+                          const char *chip_name)
 {
-       struct amdgpu_firmware_info *ucode = NULL;
+       struct amdgpu_device *adev = psp->adev;
+       char fw_name[30];
+       const struct psp_firmware_header_v1_0 *asd_hdr;
+       int err = 0;
 
-       if (!adev->firmware.fw_size)
-               return false;
+       if (!chip_name) {
+               dev_err(adev->dev, "invalid chip name for asd microcode\n");
+               return -EINVAL;
+       }
 
-       ucode = &adev->firmware.ucode[ucode_type];
-       if (!ucode->fw || !ucode->ucode_size)
-               return false;
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+
+       err = amdgpu_ucode_validate(adev->psp.asd_fw);
+       if (err)
+               goto out;
+
+       asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+                               le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+       return 0;
+out:
+       dev_err(adev->dev, "fail to initialize asd microcode\n");
+       release_firmware(adev->psp.asd_fw);
+       adev->psp.asd_fw = NULL;
+       return err;
+}
+
+int psp_init_sos_microcode(struct psp_context *psp,
+                          const char *chip_name)
+{
+       struct amdgpu_device *adev = psp->adev;
+       char fw_name[30];
+       const struct psp_firmware_header_v1_0 *sos_hdr;
+       const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
+       const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
+       int err = 0;
+
+       if (!chip_name) {
+               dev_err(adev->dev, "invalid chip name for sos microcode\n");
+               return -EINVAL;
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
+       err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+
+       err = amdgpu_ucode_validate(adev->psp.sos_fw);
+       if (err)
+               goto out;
+
+       sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+       amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
+
+       switch (sos_hdr->header.header_version_major) {
+       case 1:
+               adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+               adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+               adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+               adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
+               adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+                               le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+               adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                               le32_to_cpu(sos_hdr->sos_offset_bytes);
+               if (sos_hdr->header.header_version_minor == 1) {
+                       sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
+                       adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
+                       adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                                       le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
+                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
+                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                                       le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
+               }
+               if (sos_hdr->header.header_version_minor == 2) {
+                       sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
+                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
+                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                                                   le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
+               }
+               break;
+       default:
+               dev_err(adev->dev,
+                       "unsupported psp sos firmware\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       return 0;
+out:
+       dev_err(adev->dev,
+               "failed to init sos firmware\n");
+       release_firmware(adev->psp.sos_fw);
+       adev->psp.sos_fw = NULL;
 
-       return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
+       return err;
 }
 
 static int psp_set_clockgating_state(void *handle,
@@ -1957,16 +2251,6 @@ static void psp_sysfs_fini(struct amdgpu_device *adev)
        device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
 }
 
-static const struct amdgpu_psp_funcs psp_funcs = {
-       .check_fw_loading_status = psp_check_fw_loading_status,
-};
-
-static void psp_set_funcs(struct amdgpu_device *adev)
-{
-       if (NULL == adev->firmware.funcs)
-               adev->firmware.funcs = &psp_funcs;
-}
-
 const struct amdgpu_ip_block_version psp_v3_1_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_PSP,
index 297435c0c7c1ad630b16eebdc29ccb7aefb04f4d..2a56ad996d834ec0a1923dba90e28bb0ed83413e 100644 (file)
@@ -93,22 +93,8 @@ struct psp_funcs
                            enum psp_ring_type ring_type);
        int (*ring_destroy)(struct psp_context *psp,
                            enum psp_ring_type ring_type);
-       bool (*compare_sram_data)(struct psp_context *psp,
-                                 struct amdgpu_firmware_info *ucode,
-                                 enum AMDGPU_UCODE_ID ucode_type);
        bool (*smu_reload_quirk)(struct psp_context *psp);
        int (*mode1_reset)(struct psp_context *psp);
-       int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
-       int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
-       int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
-                                     struct psp_xgmi_topology_info *topology);
-       int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
-                                     struct psp_xgmi_topology_info *topology);
-       bool (*support_vmr_ring)(struct psp_context *psp);
-       int (*ras_trigger_error)(struct psp_context *psp,
-                       struct ta_ras_trigger_error_input *info);
-       int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
-       int (*rlc_autoload_start)(struct psp_context *psp);
        int (*mem_training_init)(struct psp_context *psp);
        void (*mem_training_fini)(struct psp_context *psp);
        int (*mem_training)(struct psp_context *psp, uint32_t ops);
@@ -161,6 +147,7 @@ struct psp_hdcp_context {
        struct amdgpu_bo        *hdcp_shared_bo;
        uint64_t                hdcp_shared_mc_addr;
        void                    *hdcp_shared_buf;
+       struct mutex            mutex;
 };
 
 struct psp_dtm_context {
@@ -169,6 +156,7 @@ struct psp_dtm_context {
        struct amdgpu_bo        *dtm_shared_bo;
        uint64_t                dtm_shared_mc_addr;
        void                    *dtm_shared_buf;
+       struct mutex            mutex;
 };
 
 #define MEM_TRAIN_SYSTEM_SIGNATURE             0x54534942
@@ -306,8 +294,6 @@ struct amdgpu_psp_funcs {
 #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
 #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
 #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_compare_sram_data(psp, ucode, type) \
-               (psp)->funcs->compare_sram_data((psp), (ucode), (type))
 #define psp_init_microcode(psp) \
                ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
 #define psp_bootloader_load_kdb(psp) \
@@ -318,22 +304,8 @@ struct amdgpu_psp_funcs {
                ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
 #define psp_smu_reload_quirk(psp) \
                ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
-#define psp_support_vmr_ring(psp) \
-               ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
 #define psp_mode1_reset(psp) \
                ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp, node_id) \
-               ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
-#define psp_xgmi_get_hive_id(psp, hive_id) \
-               ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
-#define psp_xgmi_get_topology_info(psp, num_device, topology) \
-               ((psp)->funcs->xgmi_get_topology_info ? \
-               (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_xgmi_set_topology_info(psp, num_device, topology) \
-               ((psp)->funcs->xgmi_set_topology_info ?  \
-               (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_rlc_autoload(psp) \
-               ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
 #define psp_mem_training_init(psp) \
        ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
 #define psp_mem_training_fini(psp) \
@@ -341,15 +313,6 @@ struct amdgpu_psp_funcs {
 #define psp_mem_training(psp, ops) \
        ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
 
-#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
-
-#define psp_ras_trigger_error(psp, info) \
-       ((psp)->funcs->ras_trigger_error ? \
-       (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
-#define psp_ras_cure_posion(psp, addr) \
-       ((psp)->funcs->ras_cure_posion ? \
-       (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
-
 #define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
 #define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
 
@@ -377,10 +340,21 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
 int psp_xgmi_initialize(struct psp_context *psp);
 int psp_xgmi_terminate(struct psp_context *psp);
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology);
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology);
 
 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_ras_enable_features(struct psp_context *psp,
                union ta_ras_cmd_input *info, bool enable);
+int psp_ras_trigger_error(struct psp_context *psp,
+                         struct ta_ras_trigger_error_input *info);
+
 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 
@@ -393,4 +367,8 @@ int psp_ring_cmd_submit(struct psp_context *psp,
                        uint64_t cmd_buf_mc_addr,
                        uint64_t fence_mc_addr,
                        int index);
+int psp_init_asd_microcode(struct psp_context *psp,
+                          const char *chip_name);
+int psp_init_sos_microcode(struct psp_context *psp,
+                          const char *chip_name);
 #endif
index ab379b44679cc50f862a1e680c482f03f07278ab..50fe08bf2f727656d8680fa998795d490d6df218 100644 (file)
@@ -80,6 +80,20 @@ atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
                                uint64_t addr);
 
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+       if (adev && amdgpu_ras_get_context(adev))
+               amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+       if (adev && amdgpu_ras_get_context(adev))
+               return amdgpu_ras_get_context(adev)->error_query_ready;
+
+       return false;
+}
+
 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
                                        size_t size, loff_t *pos)
 {
@@ -281,8 +295,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
        struct ras_debug_if data;
        int ret = 0;
 
-       if (amdgpu_ras_intr_triggered()) {
-               DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+       if (!amdgpu_ras_get_error_query_ready(adev)) {
+               dev_warn(adev->dev, "RAS WARN: error injection "
+                               "currently inaccessible\n");
                return size;
        }
 
@@ -310,7 +325,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
                /* umc ce/ue error injection for a bad page is not allowed */
                if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
                    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
-                       DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+                       dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
+                                       "as bad before error injection!\n",
                                        data.inject.address);
                        break;
                }
@@ -399,7 +415,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
                .head = obj->head,
        };
 
-       if (amdgpu_ras_intr_triggered())
+       if (!amdgpu_ras_get_error_query_ready(obj->adev))
                return snprintf(buf, PAGE_SIZE,
                                "Query currently inaccessible\n");
 
@@ -486,6 +502,29 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
 }
 /* obj end */
 
+void amdgpu_ras_parse_status_code(struct amdgpu_device* adev,
+                                 const char*           invoke_type,
+                                 const char*           block_name,
+                                 enum ta_ras_status    ret)
+{
+       switch (ret) {
+       case TA_RAS_STATUS__SUCCESS:
+               return;
+       case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
+               dev_warn(adev->dev,
+                       "RAS WARN: %s %s currently unavailable\n",
+                       invoke_type,
+                       block_name);
+               break;
+       default:
+               dev_err(adev->dev,
+                       "RAS ERROR: %s %s error failed ret 0x%X\n",
+                       invoke_type,
+                       block_name,
+                       ret);
+       }
+}
+
 /* feature ctl begin */
 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
                struct ras_common_if *head)
@@ -549,19 +588,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
                struct ras_common_if *head, bool enable)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       union ta_ras_cmd_input info;
+       union ta_ras_cmd_input *info;
        int ret;
 
        if (!con)
                return -EINVAL;
 
+        info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
        if (!enable) {
-               info.disable_features = (struct ta_ras_disable_features_input) {
+               info->disable_features = (struct ta_ras_disable_features_input) {
                        .block_id =  amdgpu_ras_block_to_ta(head->block),
                        .error_type = amdgpu_ras_error_to_ta(head->type),
                };
        } else {
-               info.enable_features = (struct ta_ras_enable_features_input) {
+               info->enable_features = (struct ta_ras_enable_features_input) {
                        .block_id =  amdgpu_ras_block_to_ta(head->block),
                        .error_type = amdgpu_ras_error_to_ta(head->type),
                };
@@ -570,26 +613,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
        /* Do not enable if it is not allowed. */
        WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
        /* Are we alerady in that state we are going to set? */
-       if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
-               return 0;
+       if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
+               ret = 0;
+               goto out;
+       }
 
        if (!amdgpu_ras_intr_triggered()) {
-               ret = psp_ras_enable_features(&adev->psp, &info, enable);
+               ret = psp_ras_enable_features(&adev->psp, info, enable);
                if (ret) {
-                       DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
-                                       enable ? "enable":"disable",
-                                       ras_block_str(head->block),
-                                       ret);
+                       amdgpu_ras_parse_status_code(adev,
+                                                    enable ? "enable":"disable",
+                                                    ras_block_str(head->block),
+                                                   (enum ta_ras_status)ret);
                        if (ret == TA_RAS_STATUS__RESET_NEEDED)
-                               return -EAGAIN;
-                       return -EINVAL;
+                               ret = -EAGAIN;
+                       else
+                               ret = -EINVAL;
+
+                       goto out;
                }
        }
 
        /* setup the obj */
        __amdgpu_ras_feature_enable(adev, head, enable);
-
-       return 0;
+       ret = 0;
+out:
+       kfree(info);
+       return ret;
 }
 
 /* Only used in device probe stage and called only once. */
@@ -618,7 +668,8 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
                        if (ret == -EINVAL) {
                                ret = __amdgpu_ras_feature_enable(adev, head, 1);
                                if (!ret)
-                                       DRM_INFO("RAS INFO: %s setup object\n",
+                                       dev_info(adev->dev,
+                                               "RAS INFO: %s setup object\n",
                                                ras_block_str(head->block));
                        }
                } else {
@@ -744,17 +795,48 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
        info->ce_count = obj->err_data.ce_count;
 
        if (err_data.ce_count) {
-               dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
-                        obj->err_data.ce_count, ras_block_str(info->head.block));
+               dev_info(adev->dev, "%ld correctable hardware errors "
+                                       "detected in %s block, no user "
+                                       "action is needed.\n",
+                                       obj->err_data.ce_count,
+                                       ras_block_str(info->head.block));
        }
        if (err_data.ue_count) {
-               dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
-                        obj->err_data.ue_count, ras_block_str(info->head.block));
+               dev_info(adev->dev, "%ld uncorrectable hardware errors "
+                                       "detected in %s block\n",
+                                       obj->err_data.ue_count,
+                                       ras_block_str(info->head.block));
        }
 
        return 0;
 }
 
+/* Trigger XGMI/WAFL error */
+int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+                                struct ta_ras_trigger_error_input *block_info)
+{
+       int ret;
+
+       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+               dev_warn(adev->dev, "Failed to disallow df cstate");
+
+       if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+               dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+       ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+       if (amdgpu_ras_intr_triggered())
+               return ret;
+
+       if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+               dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+               dev_warn(adev->dev, "Failed to allow df cstate");
+
+       return ret;
+}
+
 /* wrapper of psp_ras_trigger_error */
 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
                struct ras_inject_if *info)
@@ -788,20 +870,22 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
                break;
        case AMDGPU_RAS_BLOCK__UMC:
        case AMDGPU_RAS_BLOCK__MMHUB:
-       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
        case AMDGPU_RAS_BLOCK__PCIE_BIF:
                ret = psp_ras_trigger_error(&adev->psp, &block_info);
                break;
+       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+               ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
+               break;
        default:
-               DRM_INFO("%s error injection is not supported yet\n",
+               dev_info(adev->dev, "%s error injection is not supported yet\n",
                         ras_block_str(info->head.block));
                ret = -EINVAL;
        }
 
-       if (ret)
-               DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
-                               ras_block_str(info->head.block),
-                               ret);
+       amdgpu_ras_parse_status_code(adev,
+                                    "inject",
+                                    ras_block_str(info->head.block),
+                                    (enum ta_ras_status)ret);
 
        return ret;
 }
@@ -1430,9 +1514,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
        struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
 
        /* Build list of devices to query RAS related errors */
-       if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+       if  (hive && adev->gmc.xgmi.num_physical_nodes > 1)
                device_list_handle = &hive->device_list;
-       } else {
+       else {
+               INIT_LIST_HEAD(&device_list);
                list_add_tail(&adev->gmc.xgmi.head, &device_list);
                device_list_handle = &device_list;
        }
@@ -1535,7 +1620,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
                                                        &data->bps[control->num_recs],
                                                        true,
                                                        save_count)) {
-                       DRM_ERROR("Failed to save EEPROM table data!");
+                       dev_err(adev->dev, "Failed to save EEPROM table data!");
                        return -EIO;
                }
 
@@ -1563,7 +1648,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
 
        if (amdgpu_ras_eeprom_process_recods(control, bps, false,
                control->num_recs)) {
-               DRM_ERROR("Failed to load EEPROM table records!");
+               dev_err(adev->dev, "Failed to load EEPROM table records!");
                ret = -EIO;
                goto out;
        }
@@ -1637,7 +1722,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
                                               AMDGPU_GPU_PAGE_SIZE,
                                               AMDGPU_GEM_DOMAIN_VRAM,
                                               &bo, NULL))
-                       DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+                       dev_warn(adev->dev, "RAS WARN: reserve vram for "
+                                       "retired page %llx fail\n", bp);
 
                data->bps_bo[i] = bo;
                data->last_reserved = i + 1;
@@ -1725,7 +1811,7 @@ free:
        kfree(*data);
        con->eh_data = NULL;
 out:
-       DRM_WARN("Failed to initialize ras recovery!\n");
+       dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
 
        return ret;
 }
@@ -1787,18 +1873,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
                return;
 
        if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
-               DRM_INFO("HBM ECC is active.\n");
+               dev_info(adev->dev, "HBM ECC is active.\n");
                *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
                                1 << AMDGPU_RAS_BLOCK__DF);
        } else
-               DRM_INFO("HBM ECC is not presented.\n");
+               dev_info(adev->dev, "HBM ECC is not presented.\n");
 
        if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
-               DRM_INFO("SRAM ECC is active.\n");
+               dev_info(adev->dev, "SRAM ECC is active.\n");
                *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
                                1 << AMDGPU_RAS_BLOCK__DF);
        } else
-               DRM_INFO("SRAM ECC is not presented.\n");
+               dev_info(adev->dev, "SRAM ECC is not presented.\n");
 
        /* hw_supported needs to be aligned with RAS block mask. */
        *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -1855,7 +1941,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        if (amdgpu_ras_fs_init(adev))
                goto fs_out;
 
-       DRM_INFO("RAS INFO: ras initialized successfully, "
+       dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
                        "hardware ability[%x] ras_mask[%x]\n",
                        con->hw_supported, con->supported);
        return 0;
@@ -2037,7 +2123,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
                return;
 
        if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
-               DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+               dev_info(adev->dev, "uncorrectable hardware error"
+                       "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
 
                amdgpu_ras_reset_gpu(adev);
        }
index 55c3eceb390d4f4656c62d8c0af064564e4c1587..e7df5d8429f825946245c9fc9fc81cb338fc8bbc 100644 (file)
@@ -334,6 +334,8 @@ struct amdgpu_ras {
        uint32_t flags;
        bool reboot;
        struct amdgpu_ras_eeprom_control eeprom_control;
+
+       bool error_query_ready;
 };
 
 struct ras_fs_data {
@@ -629,4 +631,6 @@ static inline void amdgpu_ras_intr_cleared(void)
 
 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
 
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
+
 #endif
index a7e1d0425ed08b74d640a8fd8a9bd4820d7ec30e..13ea8ebc421c6e47bad20b525ab525883c3d852f 100644 (file)
@@ -162,11 +162,13 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned max_dw, struct amdgpu_irq_src *irq_src,
-                    unsigned irq_type)
+                    unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+                    unsigned int irq_type, unsigned int hw_prio)
 {
        int r, i;
        int sched_hw_submission = amdgpu_sched_hw_submission;
+       u32 *num_sched;
+       u32 hw_ip;
 
        /* Set the hw submission limit higher for KIQ because
         * it's used for a number of gfx/compute tasks by both
@@ -258,6 +260,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
        ring->priority = DRM_SCHED_PRIORITY_NORMAL;
        mutex_init(&ring->priority_mutex);
 
+       if (!ring->no_scheduler) {
+               hw_ip = ring->funcs->type;
+               num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+               adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+                       &ring->sched;
+       }
+
        for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
                atomic_set(&ring->num_jobs[i], 0);
 
index 9a443013d70d5d4a94099206eb84ecb1662f7b6a..be218754629ab85b02000c9ea6d379ab44ad8148 100644 (file)
 
 /* max number of rings */
 #define AMDGPU_MAX_RINGS               28
+#define AMDGPU_MAX_HWIP_RINGS          8
 #define AMDGPU_MAX_GFX_RINGS           2
 #define AMDGPU_MAX_COMPUTE_RINGS       8
 #define AMDGPU_MAX_VCE_RINGS           3
 #define AMDGPU_MAX_UVD_ENC_RINGS       2
 
+#define AMDGPU_RING_PRIO_DEFAULT       1
+#define AMDGPU_RING_PRIO_MAX           AMDGPU_GFX_PIPE_PRIO_MAX
+
 /* some special values for the owner field */
 #define AMDGPU_FENCE_OWNER_UNDEFINED   ((void *)0ul)
 #define AMDGPU_FENCE_OWNER_VM          ((void *)1ul)
 
 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
 
+#define AMDGPU_IB_POOL_SIZE    (1024 * 1024)
+
 enum amdgpu_ring_type {
-       AMDGPU_RING_TYPE_GFX,
-       AMDGPU_RING_TYPE_COMPUTE,
-       AMDGPU_RING_TYPE_SDMA,
-       AMDGPU_RING_TYPE_UVD,
-       AMDGPU_RING_TYPE_VCE,
-       AMDGPU_RING_TYPE_KIQ,
-       AMDGPU_RING_TYPE_UVD_ENC,
-       AMDGPU_RING_TYPE_VCN_DEC,
-       AMDGPU_RING_TYPE_VCN_ENC,
-       AMDGPU_RING_TYPE_VCN_JPEG
+       AMDGPU_RING_TYPE_GFX            = AMDGPU_HW_IP_GFX,
+       AMDGPU_RING_TYPE_COMPUTE        = AMDGPU_HW_IP_COMPUTE,
+       AMDGPU_RING_TYPE_SDMA           = AMDGPU_HW_IP_DMA,
+       AMDGPU_RING_TYPE_UVD            = AMDGPU_HW_IP_UVD,
+       AMDGPU_RING_TYPE_VCE            = AMDGPU_HW_IP_VCE,
+       AMDGPU_RING_TYPE_UVD_ENC        = AMDGPU_HW_IP_UVD_ENC,
+       AMDGPU_RING_TYPE_VCN_DEC        = AMDGPU_HW_IP_VCN_DEC,
+       AMDGPU_RING_TYPE_VCN_ENC        = AMDGPU_HW_IP_VCN_ENC,
+       AMDGPU_RING_TYPE_VCN_JPEG       = AMDGPU_HW_IP_VCN_JPEG,
+       AMDGPU_RING_TYPE_KIQ
+};
+
+enum amdgpu_ib_pool_type {
+       /* Normal submissions to the top of the pipeline. */
+       AMDGPU_IB_POOL_DELAYED,
+       /* Immediate submissions to the bottom of the pipeline. */
+       AMDGPU_IB_POOL_IMMEDIATE,
+       /* Direct submission to the ring buffer during init and reset. */
+       AMDGPU_IB_POOL_DIRECT,
+
+       AMDGPU_IB_POOL_MAX
 };
 
 struct amdgpu_device;
@@ -65,6 +82,11 @@ struct amdgpu_ib;
 struct amdgpu_cs_parser;
 struct amdgpu_job;
 
+struct amdgpu_sched {
+       u32                             num_scheds;
+       struct drm_gpu_scheduler        *sched[AMDGPU_MAX_HWIP_RINGS];
+};
+
 /*
  * Fences.
  */
@@ -96,7 +118,8 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
                      unsigned flags);
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+                             uint32_t timeout);
 bool amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
@@ -159,17 +182,20 @@ struct amdgpu_ring_funcs {
        void (*end_use)(struct amdgpu_ring *ring);
        void (*emit_switch_buffer) (struct amdgpu_ring *ring);
        void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
-       void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
+       void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
+                         uint32_t reg_val_offs);
        void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
        void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
                              uint32_t val, uint32_t mask);
        void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
                                        uint32_t reg0, uint32_t reg1,
                                        uint32_t ref, uint32_t mask);
-       void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+       void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
+                               bool secure);
        /* Try to soft recover the ring to make the fence signal */
        void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
        int (*preempt_ib)(struct amdgpu_ring *ring);
+       void (*emit_mem_sync)(struct amdgpu_ring *ring);
 };
 
 struct amdgpu_ring {
@@ -214,12 +240,12 @@ struct amdgpu_ring {
        unsigned                vm_inv_eng;
        struct dma_fence        *vmid_wait;
        bool                    has_compute_vm_bug;
+       bool                    no_scheduler;
 
        atomic_t                num_jobs[DRM_SCHED_PRIORITY_MAX];
        struct mutex            priority_mutex;
        /* protected by priority_mutex */
        int                     priority;
-       bool                    has_high_prio;
 
 #if defined(CONFIG_DEBUG_FS)
        struct dentry *ent;
@@ -241,11 +267,11 @@ struct amdgpu_ring {
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
-#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
-#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -257,8 +283,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned ring_size, struct amdgpu_irq_src *irq_src,
-                    unsigned irq_type);
+                    unsigned int ring_size, struct amdgpu_irq_src *irq_src,
+                    unsigned int irq_type, unsigned int prio);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
                                                uint32_t reg0, uint32_t val0,
index 4b352206354b884f5d6cae71d5a26af1008b5edc..e5b8fb8e75c5250427761a0466e66f65ea138aa6 100644 (file)
@@ -61,8 +61,6 @@ struct amdgpu_sdma_ras_funcs {
 
 struct amdgpu_sdma {
        struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
-       struct drm_gpu_scheduler    *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
-       uint32_t                    num_sdma_sched;
        struct amdgpu_irq_src   trap_irq;
        struct amdgpu_irq_src   illegal_inst_irq;
        struct amdgpu_irq_src   ecc_irq;
@@ -91,7 +89,8 @@ struct amdgpu_buffer_funcs {
                                 /* dst addr in bytes */
                                 uint64_t dst_offset,
                                 /* number of byte to transfer */
-                                uint32_t byte_count);
+                                uint32_t byte_count,
+                                bool tmz);
 
        /* maximum bytes in a single operation */
        uint32_t        fill_max_bytes;
@@ -109,7 +108,7 @@ struct amdgpu_buffer_funcs {
                                 uint32_t byte_count);
 };
 
-#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b), (t))
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 
 struct amdgpu_sdma_instance *
index b8639225369611a2fbdfe899bc94ef9f88af8a4a..b87ca171986af0879394d0f09634f641b006d26f 100644 (file)
@@ -249,6 +249,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
                        continue;
 
+               /* Never sync to VM updates either. */
+               if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+                   owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+                       continue;
+
                /* Ignore fences depending on the sync mode */
                switch (mode) {
                case AMDGPU_SYNC_ALWAYS:
index b158230af8db705cfee5d012fb60738bc0cb69e5..2f4d5ca9894fc361ee72bd5b168fc89102ceac42 100644 (file)
@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
        /* Number of tests =
         * (Total GTT - IB pool - writeback page - ring buffers) / test size
         */
-       n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+       n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                if (adev->rings[i])
                        n -= adev->rings[i]->ring_size;
@@ -124,7 +124,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                amdgpu_bo_kunmap(gtt_obj[i]);
 
                r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
-                                      size, NULL, &fence, false, false);
+                                      size, NULL, &fence, false, false, false);
 
                if (r) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -170,7 +170,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                amdgpu_bo_kunmap(vram_obj);
 
                r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
-                                      size, NULL, &fence, false, false);
+                                      size, NULL, &fence, false, false, false);
 
                if (r) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
index 63e734a125fb60fb4efe2dc7f5bcc769825fc022..5da20fc166d98e2cb92c644d11d2d55d08b8563f 100644 (file)
@@ -35,7 +35,7 @@
 #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
         job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
 
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
                      (unsigned long)__entry->value)
 );
 
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
index 6309ff72bd78765f45baa9772058becc52dbf12e..eff1f73302de9da22ef1e3b8456b304c81c16bed 100644 (file)
 
 #define AMDGPU_TTM_VRAM_MAX_DW_READ    (size_t)128
 
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr);
 
 /**
  * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
@@ -277,7 +272,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
  *
  */
 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
-                                              unsigned long *offset)
+                                              uint64_t *offset)
 {
        struct drm_mm_node *mm_node = mem->mm_node;
 
@@ -288,92 +283,192 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
        return mm_node;
 }
 
+/**
+ * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @bo: buffer object to map
+ * @mem: memory object to map
+ * @mm_node: drm_mm node object to map
+ * @num_pages: number of pages to map
+ * @offset: offset into @mm_node where to start
+ * @window: which GART window to use
+ * @ring: DMA ring to use for the copy
+ * @tmz: if we should setup a TMZ enabled mapping
+ * @addr: resulting address inside the MC address space
+ *
+ * Setup one of the GART windows to access a specific piece of memory or return
+ * the physical address for local memory.
+ */
+static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+                                struct ttm_mem_reg *mem,
+                                struct drm_mm_node *mm_node,
+                                unsigned num_pages, uint64_t offset,
+                                unsigned window, struct amdgpu_ring *ring,
+                                bool tmz, uint64_t *addr)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_job *job;
+       unsigned num_dw, num_bytes;
+       struct dma_fence *fence;
+       uint64_t src_addr, dst_addr;
+       void *cpu_addr;
+       uint64_t flags;
+       unsigned int i;
+       int r;
+
+       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+       /* Map only what can't be accessed directly */
+       if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
+               *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+               return 0;
+       }
+
+       *addr = adev->gmc.gart_start;
+       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+               AMDGPU_GPU_PAGE_SIZE;
+       *addr += offset & ~PAGE_MASK;
+
+       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+       num_bytes = num_pages * 8;
+
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+                                    AMDGPU_IB_POOL_DELAYED, &job);
+       if (r)
+               return r;
+
+       src_addr = num_dw * 4;
+       src_addr += job->ibs[0].gpu_addr;
+
+       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+                               dst_addr, num_bytes, false);
+
+       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+
+       flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+       if (tmz)
+               flags |= AMDGPU_PTE_TMZ;
+
+       cpu_addr = &job->ibs[0].ptr[num_dw];
+
+       if (mem->mem_type == TTM_PL_TT) {
+               struct ttm_dma_tt *dma;
+               dma_addr_t *dma_address;
+
+               dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+               dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+               r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+                                   cpu_addr);
+               if (r)
+                       goto error_free;
+       } else {
+               dma_addr_t dma_address;
+
+               dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+               dma_address += adev->vm_manager.vram_base_offset;
+
+               for (i = 0; i < num_pages; ++i) {
+                       r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
+                                           &dma_address, flags, cpu_addr);
+                       if (r)
+                               goto error_free;
+
+                       dma_address += PAGE_SIZE;
+               }
+       }
+
+       r = amdgpu_job_submit(job, &adev->mman.entity,
+                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       if (r)
+               goto error_free;
+
+       dma_fence_put(fence);
+
+       return r;
+
+error_free:
+       amdgpu_job_free(job);
+       return r;
+}
+
 /**
  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
  *
  * The function copies @size bytes from {src->mem + src->offset} to
  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
  * move and different for a BO to BO copy.
  *
- * @f: Returns the last fence if multiple jobs are submitted.
  */
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
-                              struct amdgpu_copy_mem *src,
-                              struct amdgpu_copy_mem *dst,
-                              uint64_t size,
+                              const struct amdgpu_copy_mem *src,
+                              const struct amdgpu_copy_mem *dst,
+                              uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f)
 {
+       const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+                                       AMDGPU_GPU_PAGE_SIZE);
+
+       uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct drm_mm_node *src_mm, *dst_mm;
-       uint64_t src_node_start, dst_node_start, src_node_size,
-                dst_node_size, src_page_offset, dst_page_offset;
        struct dma_fence *fence = NULL;
        int r = 0;
-       const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
-                                       AMDGPU_GPU_PAGE_SIZE);
 
        if (!adev->mman.buffer_funcs_enabled) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
 
-       src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
-       src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
-                                            src->offset;
-       src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
-       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+       src_offset = src->offset;
+       src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+       src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
 
-       dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
-       dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
-                                            dst->offset;
-       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
-       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+       dst_offset = dst->offset;
+       dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
 
        mutex_lock(&adev->mman.gtt_window_lock);
 
        while (size) {
-               unsigned long cur_size;
-               uint64_t from = src_node_start, to = dst_node_start;
+               uint32_t src_page_offset = src_offset & ~PAGE_MASK;
+               uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
                struct dma_fence *next;
+               uint32_t cur_size;
+               uint64_t from, to;
 
                /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
                 * begins at an offset, then adjust the size accordingly
                 */
-               cur_size = min3(min(src_node_size, dst_node_size), size,
-                               GTT_MAX_BYTES);
-               if (cur_size + src_page_offset > GTT_MAX_BYTES ||
-                   cur_size + dst_page_offset > GTT_MAX_BYTES)
-                       cur_size -= max(src_page_offset, dst_page_offset);
-
-               /* Map only what needs to be accessed. Map src to window 0 and
-                * dst to window 1
-                */
-               if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(src->bo, src->mem,
-                                       PFN_UP(cur_size + src_page_offset),
-                                       src_node_start, 0, ring,
-                                       &from);
-                       if (r)
-                               goto error;
-                       /* Adjust the offset because amdgpu_map_buffer returns
-                        * start of mapped page
-                        */
-                       from += src_page_offset;
-               }
+               cur_size = max(src_page_offset, dst_page_offset);
+               cur_size = min(min3(src_node_size, dst_node_size, size),
+                              (uint64_t)(GTT_MAX_BYTES - cur_size));
+
+               /* Map src to window 0 and dst to window 1. */
+               r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+                                         PFN_UP(cur_size + src_page_offset),
+                                         src_offset, 0, ring, tmz, &from);
+               if (r)
+                       goto error;
 
-               if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(dst->bo, dst->mem,
-                                       PFN_UP(cur_size + dst_page_offset),
-                                       dst_node_start, 1, ring,
-                                       &to);
-                       if (r)
-                               goto error;
-                       to += dst_page_offset;
-               }
+               r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+                                         PFN_UP(cur_size + dst_page_offset),
+                                         dst_offset, 1, ring, tmz, &to);
+               if (r)
+                       goto error;
 
                r = amdgpu_copy_buffer(ring, from, to, cur_size,
-                                      resv, &next, false, true);
+                                      resv, &next, false, true, tmz);
                if (r)
                        goto error;
 
@@ -386,23 +481,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 
                src_node_size -= cur_size;
                if (!src_node_size) {
-                       src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
-                                                            src->mem);
-                       src_node_size = (src_mm->size << PAGE_SHIFT);
-                       src_page_offset = 0;
+                       ++src_mm;
+                       src_node_size = src_mm->size << PAGE_SHIFT;
+                       src_offset = 0;
                } else {
-                       src_node_start += cur_size;
-                       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+                       src_offset += cur_size;
                }
+
                dst_node_size -= cur_size;
                if (!dst_node_size) {
-                       dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
-                                                            dst->mem);
-                       dst_node_size = (dst_mm->size << PAGE_SHIFT);
-                       dst_page_offset = 0;
+                       ++dst_mm;
+                       dst_node_size = dst_mm->size << PAGE_SHIFT;
+                       dst_offset = 0;
                } else {
-                       dst_node_start += cur_size;
-                       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+                       dst_offset += cur_size;
                }
        }
 error:
@@ -425,6 +517,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                            struct ttm_mem_reg *old_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_copy_mem src, dst;
        struct dma_fence *fence = NULL;
        int r;
@@ -438,14 +531,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
        r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
                                       new_mem->num_pages << PAGE_SHIFT,
+                                      amdgpu_bo_encrypted(abo),
                                       bo->base.resv, &fence);
        if (r)
                goto error;
 
        /* clear the space being freed */
        if (old_mem->mem_type == TTM_PL_VRAM &&
-           (ttm_to_amdgpu_bo(bo)->flags &
-            AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+           (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
                r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@ -742,8 +835,8 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                                           unsigned long page_offset)
 {
+       uint64_t offset = (page_offset << PAGE_SHIFT);
        struct drm_mm_node *mm;
-       unsigned long offset = (page_offset << PAGE_SHIFT);
 
        mm = amdgpu_find_mm_node(&bo->mem, &offset);
        return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
@@ -1027,6 +1120,9 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
 
+       if (amdgpu_bo_encrypted(abo))
+               flags |= AMDGPU_PTE_TMZ;
+
        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
 
@@ -1539,6 +1635,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 
        switch (bo->mem.mem_type) {
        case TTM_PL_TT:
+               if (amdgpu_bo_is_amdgpu_bo(bo) &&
+                   amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+                       return false;
                return true;
 
        case TTM_PL_VRAM:
@@ -1587,8 +1686,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
        if (bo->mem.mem_type != TTM_PL_VRAM)
                return -EIO;
 
-       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
-       pos = (nodes->start << PAGE_SHIFT) + offset;
+       pos = offset;
+       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
+       pos += (nodes->start << PAGE_SHIFT);
 
        while (len && pos < adev->gmc.mc_vram_size) {
                uint64_t aligned_pos = pos & ~(uint64_t)3;
@@ -1857,17 +1957,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                return r;
 
        /*
-        * reserve one TMR (64K) memory at the top of VRAM which holds
+        * reserve TMR memory at the top of VRAM which holds
         * IP Discovery data and is protected by PSP.
         */
-       r = amdgpu_bo_create_kernel_at(adev,
-                                      adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
-                                      DISCOVERY_TMR_SIZE,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
-                                      &adev->discovery_memory,
-                                      NULL);
-       if (r)
-               return r;
+       if (adev->discovery_tmr_size > 0) {
+               r = amdgpu_bo_create_kernel_at(adev,
+                       adev->gmc.real_vram_size - adev->discovery_tmr_size,
+                       adev->discovery_tmr_size,
+                       AMDGPU_GEM_DOMAIN_VRAM,
+                       &adev->discovery_memory,
+                       NULL);
+               if (r)
+                       return r;
+       }
 
        DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
                 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -2015,75 +2117,14 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
        return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
 }
 
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr)
-{
-       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
-       struct amdgpu_device *adev = ring->adev;
-       struct ttm_tt *ttm = bo->ttm;
-       struct amdgpu_job *job;
-       unsigned num_dw, num_bytes;
-       dma_addr_t *dma_address;
-       struct dma_fence *fence;
-       uint64_t src_addr, dst_addr;
-       uint64_t flags;
-       int r;
-
-       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
-              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-
-       *addr = adev->gmc.gart_start;
-       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
-               AMDGPU_GPU_PAGE_SIZE;
-
-       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
-       num_bytes = num_pages * 8;
-
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
-       if (r)
-               return r;
-
-       src_addr = num_dw * 4;
-       src_addr += job->ibs[0].gpu_addr;
-
-       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
-       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
-       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
-                               dst_addr, num_bytes);
-
-       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-       WARN_ON(job->ibs[0].length_dw > num_dw);
-
-       dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
-       flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
-       r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
-                           &job->ibs[0].ptr[num_dw]);
-       if (r)
-               goto error_free;
-
-       r = amdgpu_job_submit(job, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-       if (r)
-               goto error_free;
-
-       dma_fence_put(fence);
-
-       return r;
-
-error_free:
-       amdgpu_job_free(job);
-       return r;
-}
-
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
-                      bool vm_needs_flush)
+                      bool vm_needs_flush, bool tmz)
 {
+       enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+               AMDGPU_IB_POOL_DELAYED;
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
 
@@ -2101,7 +2142,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
        if (r)
                return r;
 
@@ -2123,7 +2164,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
                amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
-                                       dst_offset, cur_size_in_bytes);
+                                       dst_offset, cur_size_in_bytes, tmz);
 
                src_offset += cur_size_in_bytes;
                dst_offset += cur_size_in_bytes;
@@ -2190,7 +2231,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
        /* for IB padding */
        num_dw += 64;
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+                                    &job);
        if (r)
                return r;
 
index bd05bbb4878d96a69ebc4b28d93c6d4040fe5d53..4351d02644a7bef01177d8e0d57bf9cb72cb56a1 100644 (file)
@@ -24,8 +24,9 @@
 #ifndef __AMDGPU_TTM_H__
 #define __AMDGPU_TTM_H__
 
-#include "amdgpu.h"
+#include <linux/dma-direction.h>
 #include <drm/gpu_scheduler.h>
+#include "amdgpu.h"
 
 #define AMDGPU_PL_GDS          (TTM_PL_PRIV + 0)
 #define AMDGPU_PL_GWS          (TTM_PL_PRIV + 1)
@@ -74,6 +75,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+                             struct ttm_mem_reg *mem,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table **sgt);
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table *sgt);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
@@ -87,11 +97,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
-                      bool vm_needs_flush);
+                      bool vm_needs_flush, bool tmz);
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
-                              struct amdgpu_copy_mem *src,
-                              struct amdgpu_copy_mem *dst,
-                              uint64_t size,
+                              const struct amdgpu_copy_mem *src,
+                              const struct amdgpu_copy_mem *dst,
+                              uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
index 9ef3124282318f37dffcf20c2ebf98ebf6561372..65bb25e31d450ba7c194eecf3fdd9a8832dc7d23 100644 (file)
@@ -403,8 +403,8 @@ FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
 FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
 FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
 FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
 FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
 FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
 FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
index 9dd51f0d2c11be70b0c1b9cfd360fefde6a54300..af1b1ccf613c98589a266e609951ada186af7ddb 100644 (file)
@@ -110,7 +110,8 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
                 * even NOMEM error is encountered
                 */
                if(!err_data->err_addr)
-                       DRM_WARN("Failed to alloc memory for umc error address record!\n");
+                       dev_warn(adev->dev, "Failed to alloc memory for "
+                                       "umc error address record!\n");
 
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
@@ -120,10 +121,14 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
 
        /* only uncorrectable error needs gpu reset */
        if (err_data->ue_count) {
+               dev_info(adev->dev, "%ld uncorrectable hardware errors "
+                               "detected in UMC block\n",
+                               err_data->ue_count);
+
                if (err_data->err_addr_cnt &&
                    amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
                                                err_data->err_addr_cnt))
-                       DRM_WARN("Failed to add ras bad page!\n");
+                       dev_warn(adev->dev, "Failed to add ras bad page!\n");
 
                amdgpu_ras_reset_gpu(adev);
        }
index 5fd32ad1c5751e9b9ca0f11fda0c937ad0d798e3..5100ebe8858d442af14f4bf1ef46cdd30eb8c5cd 100644 (file)
@@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                        goto err;
        }
 
-       r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+                                    AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                goto err;
 
index 59ddba137946bd5e5a345eab215fb5c823628d3c..ecaa2d7483b20d19883b16f01e892941dc78e2b2 100644 (file)
@@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                    AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -524,7 +525,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        struct dma_fence *f = NULL;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                    direct ? AMDGPU_IB_POOL_DIRECT :
+                                    AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                return r;
 
index a41272fbcba23ab7f87915b71b9d7ddbc4b32c03..2de99b4416018c3bbc8d33a57e9c0e8dc47ce89d 100644 (file)
@@ -56,13 +56,17 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
 
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 {
-       unsigned long bo_size;
+       unsigned long bo_size, fw_shared_bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
        unsigned char fw_check;
        int i, r;
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
+       mutex_init(&adev->vcn.vcn_pg_lock);
+       atomic_set(&adev->vcn.total_submission_cnt, 0);
+       for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+               atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 
        switch (adev->asic_type) {
        case CHIP_RAVEN:
@@ -178,6 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                                return r;
                        }
                }
+
+               r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
+                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
+                               &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
+               if (r) {
+                       dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
+                       return r;
+               }
+
+               fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+               adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
        }
 
        return 0;
@@ -192,6 +207,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
                if (adev->vcn.harvest_config & (1 << j))
                        continue;
+
+               kvfree(adev->vcn.inst[j].saved_shm_bo);
+               amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
+                                         &adev->vcn.inst[j].fw_shared_gpu_addr,
+                                         (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
+
                if (adev->vcn.indirect_sram) {
                        amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
                                                  &adev->vcn.inst[j].dpg_sram_gpu_addr,
@@ -210,6 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        }
 
        release_firmware(adev->vcn.fw);
+       mutex_destroy(&adev->vcn.vcn_pg_lock);
 
        return 0;
 }
@@ -236,6 +258,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
                        return -ENOMEM;
 
                memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+
+               if (adev->vcn.inst[i].fw_shared_bo == NULL)
+                       return 0;
+
+               if (!adev->vcn.inst[i].saved_shm_bo)
+                       return -ENOMEM;
+
+               size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+               ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+               memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
        }
        return 0;
 }
@@ -273,6 +306,17 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
                        }
                        memset_io(ptr, 0, size);
                }
+
+               if (adev->vcn.inst[i].fw_shared_bo == NULL)
+                       return -EINVAL;
+
+               size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+               ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+               if (adev->vcn.inst[i].saved_shm_bo != NULL)
+                       memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
+               else
+                       memset_io(ptr, 0, size);
        }
        return 0;
 }
@@ -295,7 +339,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
                if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
                        struct dpg_pause_state new_state;
 
-                       if (fence[j])
+                       if (fence[j] ||
+                               unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
                                new_state.fw_based = VCN_DPG_STATE__PAUSE;
                        else
                                new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
@@ -307,8 +352,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
                fences += fence[j];
        }
 
-       if (fences == 0) {
-               amdgpu_gfx_off_ctrl(adev, true);
+       if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
                       AMD_PG_STATE_GATE);
        } else {
@@ -319,36 +363,46 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 
-       if (set_clocks) {
-               amdgpu_gfx_off_ctrl(adev, false);
-               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
-                      AMD_PG_STATE_UNGATE);
-       }
+       atomic_inc(&adev->vcn.total_submission_cnt);
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+       mutex_lock(&adev->vcn.vcn_pg_lock);
+       amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+              AMD_PG_STATE_UNGATE);
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
                struct dpg_pause_state new_state;
-               unsigned int fences = 0;
-               unsigned int i;
 
-               for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-                       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
-               }
-               if (fences)
+               if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+                       atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
                        new_state.fw_based = VCN_DPG_STATE__PAUSE;
-               else
-                       new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+               } else {
+                       unsigned int fences = 0;
+                       unsigned int i;
 
-               if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
-                       new_state.fw_based = VCN_DPG_STATE__PAUSE;
+                       for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+                               fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+
+                       if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+                               new_state.fw_based = VCN_DPG_STATE__PAUSE;
+                       else
+                               new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+               }
 
                adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
        }
+       mutex_unlock(&adev->vcn.vcn_pg_lock);
 }
 
 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 {
+       if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+               ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+               atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+       atomic_dec(&ring->adev->vcn.total_submission_cnt);
+
        schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 }
 
@@ -390,7 +444,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 64,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                goto err;
 
@@ -557,7 +612,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -610,7 +666,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
index 6fe057329de2b1237887dbbd3946cba653833806..90aa12b2272519f8419f3930b8c4691239fece9f 100644 (file)
                }                                                                               \
        } while (0)
 
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG    (1 << 8)
+
+enum fw_queue_mode {
+       FW_QUEUE_RING_RESET = 1,
+       FW_QUEUE_DPG_HOLD_OFF = 2,
+};
+
 enum engine_status_constants {
        UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
        UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
@@ -179,10 +186,15 @@ struct amdgpu_vcn_inst {
        struct amdgpu_irq_src   irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
+       struct amdgpu_bo        *fw_shared_bo;
        struct dpg_pause_state  pause_state;
        void                    *dpg_sram_cpu_addr;
        uint64_t                dpg_sram_gpu_addr;
        uint32_t                *dpg_sram_curr_addr;
+       atomic_t                dpg_enc_submission_cnt;
+       void                    *fw_shared_cpu_addr;
+       uint64_t                fw_shared_gpu_addr;
+       void                    *saved_shm_bo;
 };
 
 struct amdgpu_vcn {
@@ -196,16 +208,28 @@ struct amdgpu_vcn {
        uint8_t num_vcn_inst;
        struct amdgpu_vcn_inst   inst[AMDGPU_MAX_VCN_INSTANCES];
        struct amdgpu_vcn_reg    internal;
-       struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
-       struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
-       uint32_t                 num_vcn_enc_sched;
-       uint32_t                 num_vcn_dec_sched;
+       struct mutex             vcn_pg_lock;
+       atomic_t                 total_submission_cnt;
 
        unsigned        harvest_config;
        int (*pause_dpg_mode)(struct amdgpu_device *adev,
                int inst_idx, struct dpg_pause_state *new_state);
 };
 
+struct amdgpu_fw_shared_multi_queue {
+       uint8_t decode_queue_mode;
+       uint8_t encode_generalpurpose_queue_mode;
+       uint8_t encode_lowlatency_queue_mode;
+       uint8_t encode_realtime_queue_mode;
+       uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared {
+       uint32_t present_flag_0;
+       uint8_t pad[53];
+       struct amdgpu_fw_shared_multi_queue multi_queue;
+} __attribute__((__packed__));
+
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
 int amdgpu_vcn_suspend(struct amdgpu_device *adev);
index adc813cde8e281617118725a544483bc57a64d21..f3b38c9e04ca09d86db6c6a514c4508a18c73308 100644 (file)
@@ -38,7 +38,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 {
        /* enable virtual display */
-       adev->mode_info.num_crtc = 1;
+       if (adev->mode_info.num_crtc == 0)
+               adev->mode_info.num_crtc = 1;
        adev->enable_virtual_display = true;
        adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
        adev->cg_flags = 0;
@@ -59,7 +60,10 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
                                            ref, mask);
-       amdgpu_fence_emit_polling(ring, &seq);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -81,6 +85,9 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 
        return;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq:
        pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
 }
@@ -152,6 +159,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
        return 0;
 }
 
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
+{
+       struct amdgpu_virt *virt = &adev->virt;
+
+       if (virt->ops && virt->ops->req_init_data)
+               virt->ops->req_init_data(adev);
+
+       if (adev->virt.req_init_data_ver > 0)
+               DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+       else
+               DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+}
+
 /**
  * amdgpu_virt_wait_reset() - wait for reset gpu completed
  * @amdgpu:    amdgpu device.
@@ -287,3 +307,82 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                }
        }
 }
+
+void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+{
+       uint32_t reg;
+
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+               reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+               break;
+       case CHIP_VEGA10:
+       case CHIP_VEGA20:
+       case CHIP_NAVI10:
+       case CHIP_NAVI12:
+       case CHIP_ARCTURUS:
+               reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+               break;
+       default: /* other chip doesn't support SRIOV */
+               reg = 0;
+               break;
+       }
+
+       if (reg & 1)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+       if (reg & 0x80000000)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+       if (!reg) {
+               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
+                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+       }
+}
+
+bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
+{
+       return amdgpu_sriov_is_debug(adev) ? true : false;
+}
+
+bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
+{
+       return amdgpu_sriov_is_normal(adev) ? true : false;
+}
+
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
+{
+       if (!amdgpu_sriov_vf(adev) ||
+           amdgpu_virt_access_debugfs_is_kiq(adev))
+               return 0;
+
+       if (amdgpu_virt_access_debugfs_is_mmio(adev))
+               adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+       else
+               return -EPERM;
+
+       return 0;
+}
+
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
+{
+       if (amdgpu_sriov_vf(adev))
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+}
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
+{
+       enum amdgpu_sriov_vf_mode mode;
+
+       if (amdgpu_sriov_vf(adev)) {
+               if (amdgpu_sriov_is_pp_one_vf(adev))
+                       mode = SRIOV_VF_MODE_ONE_VF;
+               else
+                       mode = SRIOV_VF_MODE_MULTI_VF;
+       } else {
+               mode = SRIOV_VF_MODE_BARE_METAL;
+       }
+
+       return mode;
+}
index f0128f745bd2847c2b7aaee89a3ee7a12a4a2271..b90e822cebd767684df71b9a74788879e9cb4146 100644 (file)
 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
 
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
+enum amdgpu_sriov_vf_mode {
+       SRIOV_VF_MODE_BARE_METAL = 0,
+       SRIOV_VF_MODE_ONE_VF,
+       SRIOV_VF_MODE_MULTI_VF,
+};
+
 struct amdgpu_mm_table {
        struct amdgpu_bo        *bo;
        uint32_t                *cpu_addr;
@@ -54,6 +65,7 @@ struct amdgpu_vf_error_buffer {
 struct amdgpu_virt_ops {
        int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
        int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+       int (*req_init_data)(struct amdgpu_device *adev);
        int (*reset_gpu)(struct amdgpu_device *adev);
        int (*wait_reset)(struct amdgpu_device *adev);
        void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
@@ -83,6 +95,8 @@ enum AMDGIM_FEATURE_FLAG {
        AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
        /* VRAM LOST by GIM */
        AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+       /* MM bandwidth */
+       AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
        /* PP ONE VF MODE in GIM */
        AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
 };
@@ -256,6 +270,8 @@ struct amdgpu_virt {
        struct amdgpu_virt_fw_reserve   fw_reserve;
        uint32_t gim_feature;
        uint32_t reg_access_mode;
+       int req_init_data_ver;
+       bool tdr_debug;
 };
 
 #define amdgpu_sriov_enabled(adev) \
@@ -287,6 +303,10 @@ static inline bool is_virtual_machine(void)
 
 #define amdgpu_sriov_is_pp_one_vf(adev) \
        ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_is_debug(adev) \
+       ((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+#define amdgpu_sriov_is_normal(adev) \
+       ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
 
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -296,6 +316,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
 int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
@@ -303,4 +324,11 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
                                        unsigned int key,
                                        unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+
+bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
 #endif
index 6d9252a27916d1ee5f887846e79427cd3ed9b9a2..414a0b1c2e5a87e942b8d0edb6878528fc322665 100644 (file)
@@ -82,7 +82,7 @@ struct amdgpu_prt_cb {
        struct dma_fence_cb cb;
 };
 
-/**
+/*
  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
  * happens while holding this lock anywhere to prevent deadlocks when
  * an MMU notifier runs in reclaim-FS context.
@@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  * @adev: amdgpu_device pointer
  * @vm: VM to clear BO from
  * @bo: BO to clear
- * @direct: use a direct update
+ * @immediate: use an immediate update
  *
  * Root PD needs to be reserved when calling this.
  *
@@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                              struct amdgpu_vm *vm,
                              struct amdgpu_bo *bo,
-                             bool direct)
+                             bool immediate)
 {
        struct ttm_operation_ctx ctx = { true, false };
        unsigned level = adev->vm_manager.root_level;
@@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
-       params.direct = direct;
+       params.immediate = immediate;
 
        r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
@@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
  * @adev: amdgpu_device pointer
  * @vm: requesting vm
  * @level: the page table level
- * @direct: use a direct update
+ * @immediate: use a immediate update
  * @bp: resulting BO allocation parameters
  */
 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                              int level, bool direct,
+                              int level, bool immediate,
                               struct amdgpu_bo_param *bp)
 {
        memset(bp, 0, sizeof(*bp));
@@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        else if (!vm->root.base.bo || vm->root.base.bo->shadow)
                bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
        bp->type = ttm_bo_type_kernel;
-       bp->no_wait_gpu = direct;
+       bp->no_wait_gpu = immediate;
        if (vm->root.base.bo)
                bp->resv = vm->root.base.bo->tbo.base.resv;
 }
@@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  * @adev: amdgpu_device pointer
  * @vm: VM to allocate page tables for
  * @cursor: Which page table to allocate
- * @direct: use a direct update
+ * @immediate: use an immediate update
  *
  * Make sure a specific page table or directory is allocated.
  *
@@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
                               struct amdgpu_vm *vm,
                               struct amdgpu_vm_pt_cursor *cursor,
-                              bool direct)
+                              bool immediate)
 {
        struct amdgpu_vm_pt *entry = cursor->entry;
        struct amdgpu_bo_param bp;
@@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
        if (entry->base.bo)
                return 0;
 
-       amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
+       amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
 
        r = amdgpu_bo_create(adev, &bp, &pt);
        if (r)
@@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
        pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
        amdgpu_vm_bo_base_init(&entry->base, vm, pt);
 
-       r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
+       r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
        if (r)
                goto error_free_pt;
 
@@ -1276,7 +1276,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
- * @direct: submit directly to the paging queue
+ * @immediate: submit immediately to the paging queue
  *
  * Makes sure all directories are up to date.
  *
@@ -1284,7 +1284,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
  * 0 for success, error for failure.
  */
 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
-                         struct amdgpu_vm *vm, bool direct)
+                         struct amdgpu_vm *vm, bool immediate)
 {
        struct amdgpu_vm_update_params params;
        int r;
@@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
-       params.direct = direct;
+       params.immediate = immediate;
 
        r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
@@ -1446,20 +1446,24 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                uint64_t incr, entry_end, pe_start;
                struct amdgpu_bo *pt;
 
-               if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+               if (!params->unlocked) {
                        /* make sure that the page tables covering the
                         * address range are actually allocated
                         */
                        r = amdgpu_vm_alloc_pts(params->adev, params->vm,
-                                               &cursor, params->direct);
+                                               &cursor, params->immediate);
                        if (r)
                                return r;
                }
 
                shift = amdgpu_vm_level_shift(adev, cursor.level);
                parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
-               if (adev->asic_type < CHIP_VEGA10 &&
-                   (flags & AMDGPU_PTE_VALID)) {
+               if (params->unlocked) {
+                       /* Unlocked updates are only allowed on the leaves */
+                       if (amdgpu_vm_pt_descendant(adev, &cursor))
+                               continue;
+               } else if (adev->asic_type < CHIP_VEGA10 &&
+                          (flags & AMDGPU_PTE_VALID)) {
                        /* No huge page support before GMC v9 */
                        if (cursor.level != AMDGPU_VM_PTB) {
                                if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1557,7 +1561,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
- * @direct: direct submission in a page fault
+ * @immediate: immediate submission in a page fault
+ * @unlocked: unlocked invalidation during MM callback
  * @resv: fences we need to sync to
  * @start: start of mapped range
  * @last: last mapped entry
@@ -1572,8 +1577,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
-                                      struct amdgpu_vm *vm, bool direct,
-                                      struct dma_resv *resv,
+                                      struct amdgpu_vm *vm, bool immediate,
+                                      bool unlocked, struct dma_resv *resv,
                                       uint64_t start, uint64_t last,
                                       uint64_t flags, uint64_t addr,
                                       dma_addr_t *pages_addr,
@@ -1586,8 +1591,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
-       params.direct = direct;
+       params.immediate = immediate;
        params.pages_addr = pages_addr;
+       params.unlocked = unlocked;
 
        /* Implicitly sync to command submissions in the same VM before
         * unmapping. Sync to moving fences before mapping.
@@ -1603,11 +1609,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                goto error_unlock;
        }
 
-       if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
-               struct amdgpu_bo *root = vm->root.base.bo;
+       if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
+               struct dma_fence *tmp = dma_fence_get_stub();
 
-               if (!dma_fence_is_signaled(vm->last_direct))
-                       amdgpu_bo_fence(root, vm->last_direct, true);
+               amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
+               swap(vm->last_unlocked, tmp);
+               dma_fence_put(tmp);
        }
 
        r = vm->update_funcs->prepare(&params, resv, sync_mode);
@@ -1721,7 +1728,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                }
 
                last = min((uint64_t)mapping->last, start + max_entries - 1);
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+               r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
                                                start, last, flags, addr,
                                                dma_addr, fence);
                if (r)
@@ -1784,6 +1791,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 
        if (bo) {
                flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+
+               if (amdgpu_bo_encrypted(bo))
+                       flags |= AMDGPU_PTE_TMZ;
+
                bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
        } else {
                flags = 0x0;
@@ -2014,7 +2025,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                    mapping->start < AMDGPU_GMC_HOLE_START)
                        init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
 
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+               r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
                                                mapping->start, mapping->last,
                                                init_pte_value, 0, NULL, &f);
                amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2124,11 +2135,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
            (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
                bo_va->is_xgmi = true;
-               mutex_lock(&adev->vm_manager.lock_pstate);
                /* Power up XGMI if it can be potentially used */
-               if (++adev->vm_manager.xgmi_map_counter == 1)
-                       amdgpu_xgmi_set_pstate(adev, 1);
-               mutex_unlock(&adev->vm_manager.lock_pstate);
+               amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
        }
 
        return bo_va;
@@ -2551,12 +2559,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 
        dma_fence_put(bo_va->last_pt_update);
 
-       if (bo && bo_va->is_xgmi) {
-               mutex_lock(&adev->vm_manager.lock_pstate);
-               if (--adev->vm_manager.xgmi_map_counter == 0)
-                       amdgpu_xgmi_set_pstate(adev, 0);
-               mutex_unlock(&adev->vm_manager.lock_pstate);
-       }
+       if (bo && bo_va->is_xgmi)
+               amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
 
        kfree(bo_va);
 }
@@ -2585,7 +2589,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
                return false;
 
        /* Don't evict VM page tables while they are updated */
-       if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
+       if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
                amdgpu_vm_eviction_unlock(bo_base->vm);
                return false;
        }
@@ -2762,7 +2766,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
        if (timeout <= 0)
                return timeout;
 
-       return dma_fence_wait_timeout(vm->last_direct, true, timeout);
+       return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
 }
 
 /**
@@ -2798,7 +2802,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 
        /* create scheduler entities for page table updates */
-       r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+       r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
                                  adev->vm_manager.vm_pte_scheds,
                                  adev->vm_manager.vm_pte_num_scheds, NULL);
        if (r)
@@ -2808,7 +2812,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                                  adev->vm_manager.vm_pte_scheds,
                                  adev->vm_manager.vm_pte_num_scheds, NULL);
        if (r)
-               goto error_free_direct;
+               goto error_free_immediate;
 
        vm->pte_support_ats = false;
        vm->is_compute_context = false;
@@ -2834,7 +2838,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        else
                vm->update_funcs = &amdgpu_vm_sdma_funcs;
        vm->last_update = NULL;
-       vm->last_direct = dma_fence_get_stub();
+       vm->last_unlocked = dma_fence_get_stub();
 
        mutex_init(&vm->eviction_lock);
        vm->evicting = false;
@@ -2888,11 +2892,11 @@ error_free_root:
        vm->root.base.bo = NULL;
 
 error_free_delayed:
-       dma_fence_put(vm->last_direct);
+       dma_fence_put(vm->last_unlocked);
        drm_sched_entity_destroy(&vm->delayed);
 
-error_free_direct:
-       drm_sched_entity_destroy(&vm->direct);
+error_free_immediate:
+       drm_sched_entity_destroy(&vm->immediate);
 
        return r;
 }
@@ -3089,8 +3093,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                vm->pasid = 0;
        }
 
-       dma_fence_wait(vm->last_direct, false);
-       dma_fence_put(vm->last_direct);
+       dma_fence_wait(vm->last_unlocked, false);
+       dma_fence_put(vm->last_unlocked);
 
        list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
                if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3107,7 +3111,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        amdgpu_bo_unref(&root);
        WARN_ON(vm->root.base.bo);
 
-       drm_sched_entity_destroy(&vm->direct);
+       drm_sched_entity_destroy(&vm->immediate);
        drm_sched_entity_destroy(&vm->delayed);
 
        if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3166,9 +3170,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 
        idr_init(&adev->vm_manager.pasid_idr);
        spin_lock_init(&adev->vm_manager.pasid_lock);
-
-       adev->vm_manager.xgmi_map_counter = 0;
-       mutex_init(&adev->vm_manager.lock_pstate);
 }
 
 /**
@@ -3343,8 +3344,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
                value = 0;
        }
 
-       r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
-                                       flags, value, NULL, NULL);
+       r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
+                                       addr + 1, flags, value, NULL, NULL);
        if (r)
                goto error_unlock;
 
index 06fe30e1492d69e66d15233af1fa168be406c8b2..c8e68d7890bff92c3efdcb7a9d04e340bc950c6e 100644 (file)
@@ -54,6 +54,9 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_PTE_SYSTEM      (1ULL << 1)
 #define AMDGPU_PTE_SNOOPED     (1ULL << 2)
 
+/* RV+ */
+#define AMDGPU_PTE_TMZ         (1ULL << 3)
+
 /* VI only */
 #define AMDGPU_PTE_EXECUTABLE  (1ULL << 4)
 
@@ -203,9 +206,14 @@ struct amdgpu_vm_update_params {
        struct amdgpu_vm *vm;
 
        /**
-        * @direct: if changes should be made directly
+        * @immediate: if changes should be made immediately
         */
-       bool direct;
+       bool immediate;
+
+       /**
+        * @unlocked: true if the root BO is not locked
+        */
+       bool unlocked;
 
        /**
         * @pages_addr:
@@ -271,11 +279,11 @@ struct amdgpu_vm {
        struct dma_fence        *last_update;
 
        /* Scheduler entities for page table updates */
-       struct drm_sched_entity direct;
+       struct drm_sched_entity immediate;
        struct drm_sched_entity delayed;
 
-       /* Last submission to the scheduler entities */
-       struct dma_fence        *last_direct;
+       /* Last unlocked submission to the scheduler entities */
+       struct dma_fence        *last_unlocked;
 
        unsigned int            pasid;
        /* dedicated to vm */
@@ -349,10 +357,6 @@ struct amdgpu_vm_manager {
         */
        struct idr                              pasid_idr;
        spinlock_t                              pasid_lock;
-
-       /* counter of mapped memory through xgmi */
-       uint32_t                                xgmi_map_counter;
-       struct mutex                            lock_pstate;
 };
 
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@@ -380,7 +384,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                              void *param);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
-                         struct amdgpu_vm *vm, bool direct);
+                         struct amdgpu_vm *vm, bool immediate);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                          struct amdgpu_vm *vm,
                          struct dma_fence **fence);
index e3851630407097ad74fdd38d58bc4b8806474ce1..39c704a1fb0e556d832ff42f748a692f42e71364 100644 (file)
@@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
 
        pe += (unsigned long)amdgpu_bo_kptr(bo);
 
-       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
 
        for (i = 0; i < count; i++) {
                value = p->pages_addr ?
index cf96c335b258b479c37e10fadad3739a51862ad2..8d9c6feba660b58c9fd6fd6aa37a8b69831364fd 100644 (file)
@@ -61,10 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
                                  struct dma_resv *resv,
                                  enum amdgpu_sync_mode sync_mode)
 {
+       enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+               : AMDGPU_IB_POOL_DELAYED;
        unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
        int r;
 
-       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
        if (r)
                return r;
 
@@ -90,11 +92,11 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 {
        struct amdgpu_ib *ib = p->job->ibs;
        struct drm_sched_entity *entity;
-       struct dma_fence *f, *tmp;
        struct amdgpu_ring *ring;
+       struct dma_fence *f;
        int r;
 
-       entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+       entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
        ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
 
        WARN_ON(ib->length_dw == 0);
@@ -104,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
        if (r)
                goto error;
 
-       if (p->direct) {
-               tmp = dma_fence_get(f);
-               swap(p->vm->last_direct, tmp);
+       if (p->unlocked) {
+               struct dma_fence *tmp = dma_fence_get(f);
+
+               swap(p->vm->last_unlocked, f);
                dma_fence_put(tmp);
        } else {
-               dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+               amdgpu_bo_fence(p->vm->root.base.bo, f, true);
        }
 
-       if (fence && !p->direct)
+       if (fence && !p->immediate)
                swap(*fence, f);
        dma_fence_put(f);
        return 0;
@@ -142,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
        src += p->num_dw_left * 4;
 
        pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
-       trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
+       trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
 
        amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
 }
@@ -169,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
        struct amdgpu_ib *ib = p->job->ibs;
 
        pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
-       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
        if (count < 3) {
                amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
                                    count, incr);
@@ -198,6 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                                 uint64_t addr, unsigned count, uint32_t incr,
                                 uint64_t flags)
 {
+       enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+               : AMDGPU_IB_POOL_DELAYED;
        unsigned int i, ndw, nptes;
        uint64_t *pte;
        int r;
@@ -223,7 +228,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                        ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
                        ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
 
-                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
+                                                    &p->job);
                        if (r)
                                return r;
 
index 82a3299e53c042f6c8a5f8e688e0f36a15ca60e4..d399e58931705c9da41e1361be76041adb947ad2 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Christian König
  */
 
+#include <linux/dma-mapping.h>
 #include "amdgpu.h"
 #include "amdgpu_vm.h"
 #include "amdgpu_atomfirmware.h"
@@ -148,6 +149,15 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
                   amdgpu_mem_info_vram_vendor, NULL);
 
+static const struct attribute *amdgpu_vram_mgr_attributes[] = {
+       &dev_attr_mem_info_vram_total.attr,
+       &dev_attr_mem_info_vis_vram_total.attr,
+       &dev_attr_mem_info_vram_used.attr,
+       &dev_attr_mem_info_vis_vram_used.attr,
+       &dev_attr_mem_info_vram_vendor.attr,
+       NULL
+};
+
 /**
  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
  *
@@ -172,31 +182,9 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
        man->priv = mgr;
 
        /* Add the two VRAM-related sysfs files */
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vram_total\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vram_used\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
-               return ret;
-       }
+       ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
+       if (ret)
+               DRM_ERROR("Failed to register sysfs\n");
 
        return 0;
 }
@@ -219,11 +207,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
        spin_unlock(&mgr->lock);
        kfree(mgr);
        man->priv = NULL;
-       device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+       sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
        return 0;
 }
 
@@ -458,6 +442,104 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
        mem->mm_node = NULL;
 }
 
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @mem: TTM memory object
+ * @dev: the other device
+ * @dir: dma direction
+ * @sgt: resulting sg table
+ *
+ * Allocate and fill a sg table from a VRAM allocation.
+ */
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+                             struct ttm_mem_reg *mem,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table **sgt)
+{
+       struct drm_mm_node *node;
+       struct scatterlist *sg;
+       int num_entries = 0;
+       unsigned int pages;
+       int i, r;
+
+       *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+       if (!*sgt)
+               return -ENOMEM;
+
+       for (pages = mem->num_pages, node = mem->mm_node;
+            pages; pages -= node->size, ++node)
+               ++num_entries;
+
+       r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+       if (r)
+               goto error_free;
+
+       for_each_sg((*sgt)->sgl, sg, num_entries, i)
+               sg->length = 0;
+
+       node = mem->mm_node;
+       for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+               phys_addr_t phys = (node->start << PAGE_SHIFT) +
+                       adev->gmc.aper_base;
+               size_t size = node->size << PAGE_SHIFT;
+               dma_addr_t addr;
+
+               ++node;
+               addr = dma_map_resource(dev, phys, size, dir,
+                                       DMA_ATTR_SKIP_CPU_SYNC);
+               r = dma_mapping_error(dev, addr);
+               if (r)
+                       goto error_unmap;
+
+               sg_set_page(sg, NULL, size, 0);
+               sg_dma_address(sg) = addr;
+               sg_dma_len(sg) = size;
+       }
+       return 0;
+
+error_unmap:
+       for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+               if (!sg->length)
+                       continue;
+
+               dma_unmap_resource(dev, sg->dma_address,
+                                  sg->length, dir,
+                                  DMA_ATTR_SKIP_CPU_SYNC);
+       }
+       sg_free_table(*sgt);
+
+error_free:
+       kfree(*sgt);
+       return r;
+}
+
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @sgt: sg table to free
+ *
+ * Free a previously allocate sg table.
+ */
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table *sgt)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i)
+               dma_unmap_resource(dev, sg->dma_address,
+                                  sg->length, dir,
+                                  DMA_ATTR_SKIP_CPU_SYNC);
+       sg_free_table(sgt);
+       kfree(sgt);
+}
+
 /**
  * amdgpu_vram_mgr_usage - how many bytes are used in this domain
  *
index 95b3327168acb0354b4e03c09cfa3be041fe2f8a..e9e59bc68c9f25d92c1f0e890aa52f4825129cb8 100644 (file)
@@ -373,7 +373,13 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
 
        if (lock)
                mutex_lock(&tmp->hive_lock);
-       tmp->pstate = -1;
+       tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+       tmp->hi_req_gpu = NULL;
+       /*
+        * hive pstate on boot is high in vega20 so we have to go to low
+        * pstate on after boot.
+        */
+       tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
        mutex_unlock(&xgmi_mutex);
 
        return tmp;
@@ -383,56 +389,59 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
 {
        int ret = 0;
        struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
-       struct amdgpu_device *tmp_adev;
-       bool update_hive_pstate = true;
-       bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
+       struct amdgpu_device *request_adev = hive->hi_req_gpu ?
+                                               hive->hi_req_gpu : adev;
+       bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
+       bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
 
-       if (!hive)
+       /* fw bug so temporarily disable pstate switching */
+       return 0;
+
+       if (!hive || adev->asic_type != CHIP_VEGA20)
                return 0;
 
        mutex_lock(&hive->hive_lock);
 
-       if (hive->pstate == pstate) {
-               adev->pstate = is_high_pstate ? pstate : adev->pstate;
+       if (is_hi_req)
+               hive->hi_req_count++;
+       else
+               hive->hi_req_count--;
+
+       /*
+        * Vega20 only needs single peer to request pstate high for the hive to
+        * go high but all peers must request pstate low for the hive to go low
+        */
+       if (hive->pstate == pstate ||
+                       (!is_hi_req && hive->hi_req_count && !init_low))
                goto out;
-       }
 
-       dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+       dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
 
-       ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
+       ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
        if (ret) {
-               dev_err(adev->dev,
+               dev_err(request_adev->dev,
                        "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
-                       adev->gmc.xgmi.node_id,
-                       adev->gmc.xgmi.hive_id, ret);
+                       request_adev->gmc.xgmi.node_id,
+                       request_adev->gmc.xgmi.hive_id, ret);
                goto out;
        }
 
-       /* Update device pstate */
-       adev->pstate = pstate;
-
-       /*
-        * Update the hive pstate only all devices of the hive
-        * are in the same pstate
-        */
-       list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
-               if (tmp_adev->pstate != adev->pstate) {
-                       update_hive_pstate = false;
-                       break;
-               }
-       }
-       if (update_hive_pstate || is_high_pstate)
+       if (init_low)
+               hive->pstate = hive->hi_req_count ?
+                                       hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
+       else {
                hive->pstate = pstate;
-
+               hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
+                                                       adev : NULL;
+       }
 out:
        mutex_unlock(&hive->hive_lock);
-
        return ret;
 }
 
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
 {
-       int ret = -EINVAL;
+       int ret;
 
        /* Each psp need to set the latest topology */
        ret = psp_xgmi_set_topology_info(&adev->psp,
@@ -507,9 +516,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
                goto exit;
        }
 
-       /* Set default device pstate */
-       adev->pstate = -1;
-
        top_info = &adev->psp.xgmi_context.top_info;
 
        list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -604,6 +610,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
            adev->gmc.xgmi.num_physical_nodes == 0)
                return 0;
 
+       amdgpu_xgmi_reset_ras_error_count(adev);
+
        if (!adev->gmc.xgmi.ras_if) {
                adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
                if (!adev->gmc.xgmi.ras_if)
@@ -641,31 +649,34 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
                                           uint64_t addr)
 {
-       uint32_t df_inst_id;
-       uint64_t dram_base_addr = 0;
-       const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
-
-       if ((!df_funcs)                 ||
-           (!df_funcs->get_df_inst_id) ||
-           (!df_funcs->get_dram_base_addr)) {
-               dev_warn(adev->dev,
-                        "XGMI: relative phy_addr algorithm is not supported\n");
-               return addr;
-       }
-
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
-               dev_warn(adev->dev,
-                        "failed to disable DF-Cstate, DF register may not be accessible\n");
-               return addr;
-       }
+       struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+       return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
 
-       df_inst_id = df_funcs->get_df_inst_id(adev);
-       dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
+{
+       WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
+       WREG32_PCIE(pcs_status_reg, 0);
+}
 
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
-               dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+{
+       uint32_t i;
 
-       return addr + dram_base_addr;
+       switch (adev->asic_type) {
+       case CHIP_ARCTURUS:
+               for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
+                       pcs_clear_status(adev,
+                                        xgmi_pcs_err_status_reg_arct[i]);
+               break;
+       case CHIP_VEGA20:
+               for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
+                       pcs_clear_status(adev,
+                                        xgmi_pcs_err_status_reg_vg20[i]);
+               break;
+       default:
+               break;
+       }
 }
 
 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
@@ -758,6 +769,8 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                break;
        }
 
+       amdgpu_xgmi_reset_ras_error_count(adev);
+
        err_data->ue_count += ue_cnt;
        err_data->ce_count += ce_cnt;
 
index 4a92067fe595985609fc4f5b29a4a06704631728..6999eab16a72090c184736d432ffc857e4a935d6 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/task_barrier.h>
 #include "amdgpu_psp.h"
 
+
 struct amdgpu_hive_info {
        uint64_t                hive_id;
        struct list_head        device_list;
@@ -33,8 +34,14 @@ struct amdgpu_hive_info {
        struct kobject *kobj;
        struct device_attribute dev_attr;
        struct amdgpu_device *adev;
-       int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+       int hi_req_count;
+       struct amdgpu_device *hi_req_gpu;
        struct task_barrier tb;
+       enum {
+               AMDGPU_XGMI_PSTATE_MIN,
+               AMDGPU_XGMI_PSTATE_MAX_VEGA20,
+               AMDGPU_XGMI_PSTATE_UNKNOWN
+       } pstate;
 };
 
 struct amdgpu_pcs_ras_field {
@@ -56,6 +63,7 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
                                           uint64_t addr);
 int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                                      void *ras_error_status);
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
 
 static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
                struct amdgpu_device *bo_adev)
index cae426c7c0863b06c7c50fcbefae95ce37262ba0..4cfc786699c7fcaac2b903c463ea85bbd9379812 100644 (file)
@@ -54,6 +54,8 @@
 #define PLL_INDEX      2
 #define PLL_DATA       3
 
+#define ATOM_CMD_TIMEOUT_SEC   20
+
 typedef struct {
        struct atom_context *ctx;
        uint32_t *ps, *ws;
@@ -744,8 +746,9 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
                        cjiffies = jiffies;
                        if (time_after(cjiffies, ctx->last_jump_jiffies)) {
                                cjiffies -= ctx->last_jump_jiffies;
-                               if ((jiffies_to_msecs(cjiffies) > 10000)) {
-                                       DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
+                               if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
+                                       DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
+                                                 ATOM_CMD_TIMEOUT_SEC);
                                        ctx->abort = true;
                                }
                        } else {
index 006f21ef7ddf09a375ead24f7b31af1fa41fd207..fe306d0f73f7a14a521183f667360d3d56f027cc 100644 (file)
@@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev)
        int r;
 
        if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
-               if (!adev->in_suspend)
-                       amdgpu_inc_vram_lost(adev);
                r = amdgpu_dpm_baco_reset(adev);
        } else {
                r = cik_asic_pci_config_reset(adev);
@@ -1811,12 +1809,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
                >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
 }
 
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
@@ -2179,8 +2171,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
 
 int cik_set_ip_blocks(struct amdgpu_device *adev)
 {
-       cik_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_BONAIRE:
                amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
index 580d3f93d67093ec1925e671d8fea7d2a6f5e2ac..20f108818b2b966f5893f05e7b72e233d0b3054a 100644 (file)
@@ -320,8 +320,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
        }
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -679,7 +677,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -980,7 +979,8 @@ static int cik_sdma_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1313,7 +1313,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
 static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
                                      uint64_t src_offset,
                                      uint64_t dst_offset,
-                                     uint32_t byte_count)
+                                     uint32_t byte_count,
+                                     bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
        ib->ptr[ib->length_dw++] = byte_count;
index cee6e8a3ad9c9227c81dcfe2f568daf508686219..5f3f6ebfb3876222e69804bba5ffdc899c35288d 100644 (file)
 #              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
 #              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
 #              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
-#define        PACKET3_AQUIRE_MEM                              0x58
+#define        PACKET3_ACQUIRE_MEM                             0x58
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_LOAD_UCONFIG_REG                        0x5E
 #define        PACKET3_LOAD_SH_REG                             0x5F
index 2512e7ebfedf2ef00be981a5528b5981dba555ec..e38744d06f4e51c390479f11c7a0eb65ca98a781 100644 (file)
@@ -2303,9 +2303,9 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
@@ -2319,10 +2319,10 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
index 0dde22db9848690f1e50c3be0e819ed03151aece..2584ff74423bc391e9971a514ef641ec7185c13d 100644 (file)
@@ -2382,9 +2382,9 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
@@ -2398,10 +2398,10 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
index 84219534bd38c6ac3f73749b977605c7b161b593..d05c39f9ae40ebda7975c507d21dd9d00c02764e 100644 (file)
@@ -2194,9 +2194,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 
 
 }
@@ -2211,10 +2211,10 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  CUR_CONTROL__CURSOR_EN_MASK |
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              CUR_CONTROL__CURSOR_EN_MASK |
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 
 }
 
index 3a640702d7d193ad44bb216fe7ace2b0507df236..ad0f8adb6a2b61d7c06674db2c18063b3ed480ce 100644 (file)
@@ -2205,9 +2205,9 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 }
 
 static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
@@ -2220,10 +2220,10 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  CUR_CONTROL__CURSOR_EN_MASK |
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              CUR_CONTROL__CURSOR_EN_MASK |
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 }
 
 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
index 13e12be667fc2a7148a231cdbec3559410daa864..d5ff7b6331ff98c7636cf04ad31be92e0f4b5a8c 100644 (file)
@@ -172,8 +172,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 
-       dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       drm_crtc_vblank_off(crtc);
 
+       amdgpu_crtc->enabled = false;
        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
        amdgpu_crtc->encoder = NULL;
        amdgpu_crtc->connector = NULL;
@@ -286,7 +287,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
        static const struct mode_size {
                int w;
                int h;
-       } common_modes[17] = {
+       } common_modes[21] = {
                { 640,  480},
                { 720,  480},
                { 800,  600},
@@ -303,10 +304,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
                {1680, 1050},
                {1600, 1200},
                {1920, 1080},
-               {1920, 1200}
+               {1920, 1200},
+               {4096, 3112},
+               {3656, 2664},
+               {3840, 2160},
+               {4096, 2160},
        };
 
-       for (i = 0; i < 17; i++) {
+       for (i = 0; i < 21; i++) {
                mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
                drm_mode_probed_add(connector, mode);
        }
index 5a1bd8ed1a6c51ab34d0d65f834c3d5a8ccd666a..a7b8292cefee2998fe3d03068a1eb3e20aa6408f 100644 (file)
@@ -686,58 +686,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
        }
 }
 
-static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
-                                          uint32_t df_inst)
-{
-       uint32_t base_addr_reg_val      = 0;
-       uint64_t base_addr              = 0;
-
-       base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
-                                       df_inst * DF_3_6_SMN_REG_INST_DIST);
-
-       if (REG_GET_FIELD(base_addr_reg_val,
-                         DF_CS_UMC_AON0_DramBaseAddress0,
-                         AddrRngVal) == 0) {
-               DRM_WARN("address range not valid");
-               return 0;
-       }
-
-       base_addr = REG_GET_FIELD(base_addr_reg_val,
-                                 DF_CS_UMC_AON0_DramBaseAddress0,
-                                 DramBaseAddr);
-
-       return base_addr << 28;
-}
-
-static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
-{
-       uint32_t xgmi_node_id   = 0;
-       uint32_t df_inst_id     = 0;
-
-       /* Walk through DF dst nodes to find current XGMI node */
-       for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
-
-               xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
-                                          df_inst_id * DF_3_6_SMN_REG_INST_DIST);
-               xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
-                                            DF_CS_UMC_AON0_DramLimitAddress0,
-                                            DstFabricID);
-
-               /* TODO: establish reason dest fabric id is offset by 7 */
-               xgmi_node_id = xgmi_node_id >> 7;
-
-               if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
-                       break;
-       }
-
-       if (df_inst_id == DF_3_6_INST_CNT) {
-               DRM_WARN("cant match df dst id with gpu node");
-               return 0;
-       }
-
-       return df_inst_id;
-}
-
 const struct amdgpu_df_funcs df_v3_6_funcs = {
        .sw_init = df_v3_6_sw_init,
        .sw_fini = df_v3_6_sw_fini,
@@ -752,6 +700,4 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
        .pmc_get_count = df_v3_6_pmc_get_count,
        .get_fica = df_v3_6_get_fica,
        .set_fica = df_v3_6_set_fica,
-       .get_dram_base_addr = df_v3_6_get_dram_base_addr,
-       .get_df_inst_id = df_v3_6_get_df_inst_id
 };
index d78059fd2c72560d14392949e4fbc434cb82787b..bd5dd4f6431103b8fded2463a37717870da6f6f8 100644 (file)
@@ -138,6 +138,1062 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
        /* Pending on emulation bring up */
 };
 
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
+{
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x33),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
 static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
 {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
@@ -272,14 +1328,1694 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
        /* Pending on emulation bring up */
 };
 
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
+{
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1ac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1bc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1cc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x26),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x25),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x3b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
 static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
 {
        /* Pending on emulation bring up */
 };
 
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
+{
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x22),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x35),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
 #define DEFAULT_SH_MEM_CONFIG \
        ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
-        (SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
+        (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
         (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
         (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
 
@@ -301,7 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
 
 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
@@ -431,6 +3167,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_10_0_nv10,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_rlc_spm_10_0_nv10,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
                break;
        case CHIP_NAVI14:
                soc15_program_register_sequence(adev,
@@ -439,6 +3178,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_10_1_nv14,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_rlc_spm_10_1_nv14,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
                break;
        case CHIP_NAVI12:
                soc15_program_register_sequence(adev,
@@ -447,6 +3189,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_10_1_2_nv12,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_rlc_spm_10_1_2_nv12,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
                break;
        default:
                break;
@@ -557,7 +3302,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1298,7 +4044,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
 
        irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
        r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
        return 0;
@@ -1309,7 +4056,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 {
        int r;
        unsigned irq_type;
-       struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+       struct amdgpu_ring *ring;
+       unsigned int hw_prio;
 
        ring = &adev->gfx.compute_ring[ring_id];
 
@@ -1328,10 +4076,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
                + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
                + ring->pipe;
-
+       hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+                       AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type, hw_prio);
        if (r)
                return r;
 
@@ -1829,9 +4578,9 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
 
        /* csib */
        WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
-                    adev->gfx.rlc.clear_state_gpu_addr >> 32);
+                        adev->gfx.rlc.clear_state_gpu_addr >> 32);
        WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
-                    adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+                        adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
        WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
 
        return 0;
@@ -2441,10 +5190,6 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
-       if (!enable) {
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-       }
        WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
 
        for (i = 0; i < adev->usec_timeout; i++) {
@@ -2923,16 +5668,12 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
        if (enable) {
                WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
        } else {
                WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
                             (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
                              CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
                adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
@@ -3268,11 +6009,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
                        mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
-                       ring->has_high_prio = true;
                        mqd->cp_hqd_queue_priority =
                                AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
-               } else {
-                       ring->has_high_prio = false;
                }
        }
 }
@@ -3802,14 +6540,16 @@ static int gfx_v10_0_hw_init(void *handle)
                 * loaded firstly, so in direct type, it has to load smc ucode
                 * here before rlc.
                 */
-               r = smu_load_microcode(&adev->smu);
-               if (r)
-                       return r;
+               if (adev->smu.ppt_funcs != NULL) {
+                       r = smu_load_microcode(&adev->smu);
+                       if (r)
+                               return r;
 
-               r = smu_check_fw_status(&adev->smu);
-               if (r) {
-                       pr_err("SMC firmware status is not correct\n");
-                       return r;
+                       r = smu_check_fw_status(&adev->smu);
+                       if (r) {
+                               pr_err("SMC firmware status is not correct\n");
+                               return r;
+                       }
                }
        }
 
@@ -4273,7 +7013,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
                /* ===  CGCG /CGLS for GFX 3D Only === */
                gfx_v10_0_update_3d_clock_gating(adev, enable);
                /* ===  MGCG + MGLS === */
-               /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
+               gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
        }
 
        if (adev->cg_flags &
@@ -4292,14 +7032,21 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 
 static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
 {
-       u32 data;
+       u32 reg, data;
 
-       data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+       reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               data = RREG32_NO_KIQ(reg);
+       else
+               data = RREG32(reg);
 
        data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
 
-       WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       else
+               WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
 }
 
 static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -4341,6 +7088,20 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
        .reset = gfx_v10_0_rlc_reset,
        .start = gfx_v10_0_rlc_start,
        .update_spm_vmid = gfx_v10_0_update_spm_vmid,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
+       .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v10_0_set_safe_mode,
+       .unset_safe_mode = gfx_v10_0_unset_safe_mode,
+       .init = gfx_v10_0_rlc_init,
+       .get_csb_size = gfx_v10_0_get_csb_size,
+       .get_csb_buffer = gfx_v10_0_get_csb_buffer,
+       .resume = gfx_v10_0_rlc_resume,
+       .stop = gfx_v10_0_rlc_stop,
+       .reset = gfx_v10_0_rlc_reset,
+       .start = gfx_v10_0_rlc_start,
+       .update_spm_vmid = gfx_v10_0_update_spm_vmid,
        .rlcg_wreg = gfx_v10_rlcg_wreg,
        .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
 };
@@ -4350,14 +7111,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool enable = (state == AMD_PG_STATE_GATE);
+
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        switch (adev->asic_type) {
        case CHIP_NAVI10:
        case CHIP_NAVI14:
-               if (!enable) {
-                       amdgpu_gfx_off_ctrl(adev, false);
-                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
-               } else
-                       amdgpu_gfx_off_ctrl(adev, true);
+               amdgpu_gfx_off_ctrl(adev, enable);
                break;
        default:
                break;
@@ -4370,6 +7131,9 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        switch (adev->asic_type) {
        case CHIP_NAVI10:
        case CHIP_NAVI14:
@@ -4682,7 +7446,8 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0);
 }
 
-static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
+                                        uint32_t flags)
 {
        uint32_t dw2 = 0;
 
@@ -4690,8 +7455,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
                gfx_v10_0_ring_emit_ce_meta(ring,
                                    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
 
-       gfx_v10_0_ring_emit_tmz(ring, true);
-
        dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
        if (flags & AMDGPU_HAVE_CTX_SWITCH) {
                /* set load_global_config & load_global_uconfig */
@@ -4848,16 +7611,19 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
                                           sizeof(de_payload) >> 2);
 }
 
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+                                   bool secure)
 {
+       uint32_t v = secure ? FRAME_TMZ : 0;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
-       amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+       amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
 }
 
-static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+                                    uint32_t reg_val_offs)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 0 |     /* src: register*/
@@ -4866,9 +7632,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
 }
 
 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -4918,6 +7684,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
                                                           ref, mask);
 }
 
+static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
+                                        unsigned vmid)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t value = 0;
+
+       value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+       value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+       value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+       value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+       WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+}
+
 static void
 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
                                      uint32_t me, uint32_t pipe,
@@ -5241,6 +8020,29 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       const unsigned int gcr_cntl =
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
+
+       /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
+       amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+       amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
+}
+
 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
        .name = "gfx_v10_0",
        .early_init = gfx_v10_0_early_init,
@@ -5288,7 +8090,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               8, /* gfx_v10_0_emit_mem_sync */
        .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5305,10 +8108,12 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
        .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
        .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
        .preempt_ib = gfx_v10_0_ring_preempt_ib,
-       .emit_tmz = gfx_v10_0_ring_emit_tmz,
+       .emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+       .soft_recovery = gfx_v10_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v10_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5328,7 +8133,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                2 + /* gfx_v10_0_ring_emit_vm_flush */
-               8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+               8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+               8, /* gfx_v10_0_emit_mem_sync */
        .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
        .emit_ib = gfx_v10_0_ring_emit_ib_compute,
        .emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5343,6 +8149,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+       .emit_mem_sync = gfx_v10_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5429,9 +8236,11 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
        switch (adev->asic_type) {
        case CHIP_NAVI10:
        case CHIP_NAVI14:
-       case CHIP_NAVI12:
                adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
                break;
+       case CHIP_NAVI12:
+               adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
+               break;
        default:
                break;
        }
index 31f44d05e606d1e367da11ff176b4279c81dc2a4..79c52c7a02e3a0bd7779c136beafa591daa0cdaf 100644 (file)
@@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1950,7 +1951,6 @@ err1:
 
 static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
        if (enable) {
                WREG32(mmCP_ME_CNTL, 0);
        } else {
@@ -1958,10 +1958,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                                      CP_ME_CNTL__PFP_HALT_MASK |
                                      CP_ME_CNTL__CE_HALT_MASK));
                WREG32(mmSCRATCH_UMSK, 0);
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -3114,7 +3110,9 @@ static int gfx_v6_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    &adev->gfx.eop_irq,
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -3136,7 +3134,8 @@ static int gfx_v6_0_sw_init(void *handle)
                sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
                irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, irq_type);
+                                    &adev->gfx.eop_irq, irq_type,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -3466,6 +3465,18 @@ static int gfx_v6_0_set_powergating_state(void *handle,
        return 0;
 }
 
+static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
        .name = "gfx_v6_0",
        .early_init = gfx_v6_0_early_init,
@@ -3496,7 +3507,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
                14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
                7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
                SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
-               3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+               3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
        .emit_ib = gfx_v6_0_ring_emit_ib,
        .emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3507,6 +3519,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
        .insert_nop = amdgpu_ring_insert_nop,
        .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
        .emit_wreg = gfx_v6_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v6_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@@ -3520,7 +3533,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
                5 + 5 + /* hdp flush / invalidate */
                7 + /* gfx_v6_0_ring_emit_pipeline_sync */
                SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
-               14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+               14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
        .emit_ib = gfx_v6_0_ring_emit_ib,
        .emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3530,6 +3544,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
        .test_ib = gfx_v6_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .emit_wreg = gfx_v6_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v6_0_emit_mem_sync,
 };
 
 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
index 733d398c61ccb7b6b94c4b4384f4fd4f6f1d4ddb..0cc011f9190d3eaabac0adc40f6883cff9462724 100644 (file)
@@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -2431,15 +2432,12 @@ err1:
  */
 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
-       if (enable) {
+       if (enable)
                WREG32(mmCP_ME_CNTL, 0);
-       } else {
-               WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-       }
+       else
+               WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
+                                     CP_ME_CNTL__PFP_HALT_MASK |
+                                     CP_ME_CNTL__CE_HALT_MASK));
        udelay(50);
 }
 
@@ -2700,15 +2698,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
  */
 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
-       if (enable) {
+       if (enable)
                WREG32(mmCP_MEC_CNTL, 0);
-       } else {
-               WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
-       }
+       else
+               WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+                                      CP_MEC_CNTL__MEC_ME2_HALT_MASK));
        udelay(50);
 }
 
@@ -4439,7 +4433,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                       &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -4511,7 +4506,9 @@ static int gfx_v7_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    &adev->gfx.eop_irq,
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -5001,6 +4998,32 @@ static int gfx_v7_0_set_powergating_state(void *handle,
        return 0;
 }
 
+static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);    /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xff);          /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A);    /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
        .name = "gfx_v7_0",
        .early_init = gfx_v7_0_early_init,
@@ -5033,7 +5056,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
                12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
                7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
                CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
-               3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+               3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
@@ -5048,6 +5072,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
        .emit_wreg = gfx_v7_0_ring_emit_wreg,
        .soft_recovery = gfx_v7_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v7_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5064,7 +5089,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
                5 + /* hdp invalidate */
                7 + /* gfx_v7_0_ring_emit_pipeline_sync */
                CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
-               7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7, /* gfx_v7_0_emit_mem_sync_compute */
        .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
        .emit_ib = gfx_v7_0_ring_emit_ib_compute,
        .emit_fence = gfx_v7_0_ring_emit_fence_compute,
@@ -5077,6 +5103,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_wreg = gfx_v7_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
index fc32586ef80b1a5c91117b5f469a17826f349408..1d4128227ffd6d1fda53755e91b5bf39c5490fe0 100644 (file)
@@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
        /* allocate an indirect buffer to put the commands in */
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+       r = amdgpu_ib_get(adev, NULL, total_size,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
                return r;
@@ -1892,6 +1894,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        int r;
        unsigned irq_type;
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+       unsigned int hw_prio;
 
        ring = &adev->gfx.compute_ring[ring_id];
 
@@ -1911,9 +1914,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
                + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
                + ring->pipe;
 
+       hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+                       AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                       &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type, hw_prio);
        if (r)
                return r;
 
@@ -2017,7 +2022,8 @@ static int gfx_v8_0_sw_init(void *handle)
                }
 
                r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
-                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -4120,7 +4126,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 
 static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
        u32 tmp = RREG32(mmCP_ME_CNTL);
 
        if (enable) {
@@ -4131,8 +4136,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        WREG32(mmCP_ME_CNTL, tmp);
        udelay(50);
@@ -4320,14 +4323,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
        if (enable) {
                WREG32(mmCP_MEC_CNTL, 0);
        } else {
                WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
                adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
@@ -4437,11 +4436,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
                        mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
-                       ring->has_high_prio = true;
                        mqd->cp_hqd_queue_priority =
                                AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
-               } else {
-                       ring->has_high_prio = false;
                }
        }
 }
@@ -5619,12 +5615,18 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
 {
        u32 data;
 
-       data = RREG32(mmRLC_SPM_VMID);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
+       else
+               data = RREG32(mmRLC_SPM_VMID);
 
        data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
 
-       WREG32(mmRLC_SPM_VMID, data);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
+       else
+               WREG32(mmRLC_SPM_VMID, data);
 }
 
 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
@@ -6387,10 +6389,10 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
                ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
 }
 
-static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+                                   uint32_t reg_val_offs)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 0 |     /* src: register*/
@@ -6399,9 +6401,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
 }
 
 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6815,6 +6817,34 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA |
+                         PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA |
+                         PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);    /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xff);          /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A);    /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
        .name = "gfx_v8_0",
        .early_init = gfx_v8_0_early_init,
@@ -6861,7 +6891,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                12 + 12 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
@@ -6879,6 +6910,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
        .emit_wreg = gfx_v8_0_ring_emit_wreg,
        .soft_recovery = gfx_v8_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v8_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6895,7 +6927,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
                5 + /* hdp_invalidate */
                7 + /* gfx_v8_0_ring_emit_pipeline_sync */
                VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
-               7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7, /* gfx_v8_0_emit_mem_sync_compute */
        .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
        .emit_ib = gfx_v8_0_ring_emit_ib_compute,
        .emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6908,6 +6941,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_wreg = gfx_v8_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
index e6b113ed2f409f718dd3d3ac22fb2172be290564..1573ac1f03b246d999629549a7af64b51371324f 100644 (file)
 
 #include "gfx_v9_4.h"
 
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+
 #define GFX9_NUM_GFX_RINGS     1
 #define GFX9_MEC_HPD_SIZE 4096
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
 
-#define mmPWR_MISC_CNTL_STATUS                                 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT       0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT         0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK         0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK           0x00000006L
-
 #define mmGCEA_PROBE_MAP                        0x070c
 #define mmGCEA_PROBE_MAP_BASE_IDX               0
 
@@ -511,8 +507,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
@@ -1082,7 +1078,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1234,6 +1231,10 @@ struct amdgpu_gfxoff_quirk {
 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
        /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
+       { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
+       /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
+       { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
        { 0, 0, 0, 0, 0 },
 };
 
@@ -2195,6 +2196,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        int r;
        unsigned irq_type;
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+       unsigned int hw_prio;
 
        ring = &adev->gfx.compute_ring[ring_id];
 
@@ -2213,10 +2215,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
                + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
                + ring->pipe;
-
+       hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+                       AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type, hw_prio);
        if (r)
                return r;
 
@@ -2310,7 +2313,9 @@ static int gfx_v9_0_sw_init(void *handle)
                ring->use_doorbell = true;
                ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    &adev->gfx.eop_irq,
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -2528,7 +2533,7 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
                break;
        default:
                break;
-       };
+       }
 }
 
 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
@@ -3100,16 +3105,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
 
 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
        u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
 
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
-       if (!enable) {
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-       }
        WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
        udelay(50);
 }
@@ -3305,15 +3305,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
        if (enable) {
                WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
        } else {
                WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
                        (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
                adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
@@ -3383,11 +3379,8 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
                        mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
-                       ring->has_high_prio = true;
                        mqd->cp_hqd_queue_priority =
                                AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
-               } else {
-                       ring->has_high_prio = false;
                }
        }
 }
@@ -4054,13 +4047,18 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
 {
        signed long r, cnt = 0;
        unsigned long flags;
-       uint32_t seq;
+       uint32_t seq, reg_val_offs = 0;
+       uint64_t value = 0;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_rreg);
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
+       if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+               pr_err("critical bug! too many kiq readers\n");
+               goto failed_unlock;
+       }
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 9 |     /* src: register*/
@@ -4070,10 +4068,13 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
-       amdgpu_fence_emit_polling(ring, &seq);
+                               reg_val_offs * 4));
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -4099,10 +4100,19 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
        if (cnt > MAX_KIQ_REG_TRY)
                goto failed_kiq_read;
 
-       return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
-               (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+       mb();
+       value = (uint64_t)adev->wb.wb[reg_val_offs] |
+               (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
+       amdgpu_device_wb_free(adev, reg_val_offs);
+       return value;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+failed_unlock:
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq_read:
+       if (reg_val_offs)
+               amdgpu_device_wb_free(adev, reg_val_offs);
        pr_err("failed to read gpu clock\n");
        return ~0;
 }
@@ -4487,7 +4497,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
        /* allocate an indirect buffer to put the commands in */
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+       r = amdgpu_ib_get(adev, NULL, total_size,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
                return r;
@@ -4958,14 +4969,21 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 
 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
 {
-       u32 data;
+       u32 reg, data;
 
-       data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+       reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               data = RREG32_NO_KIQ(reg);
+       else
+               data = RREG32(reg);
 
        data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
 
-       WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       else
+               WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
 }
 
 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -5023,10 +5041,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
        switch (adev->asic_type) {
        case CHIP_RAVEN:
        case CHIP_RENOIR:
-               if (!enable) {
+               if (!enable)
                        amdgpu_gfx_off_ctrl(adev, false);
-                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
-               }
+
                if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
                        gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
                        gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
@@ -5050,12 +5067,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
                        amdgpu_gfx_off_ctrl(adev, true);
                break;
        case CHIP_VEGA12:
-               if (!enable) {
-                       amdgpu_gfx_off_ctrl(adev, false);
-                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
-               } else {
-                       amdgpu_gfx_off_ctrl(adev, true);
-               }
+               amdgpu_gfx_off_ctrl(adev, enable);
                break;
        default:
                break;
@@ -5426,10 +5438,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
        amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
 }
 
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+                                  bool secure)
 {
+       uint32_t v = secure ? FRAME_TMZ : 0;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
-       amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+       amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
 }
 
 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
@@ -5439,8 +5454,6 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
        if (amdgpu_sriov_vf(ring->adev))
                gfx_v9_0_ring_emit_ce_meta(ring);
 
-       gfx_v9_0_ring_emit_tmz(ring, true);
-
        dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
        if (flags & AMDGPU_HAVE_CTX_SWITCH) {
                /* set load_global_config & load_global_uconfig */
@@ -5491,10 +5504,10 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
                ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
 }
 
-static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+                                   uint32_t reg_val_offs)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 0 |     /* src: register*/
@@ -5503,9 +5516,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
 }
 
 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6406,15 +6419,15 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                vml2_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, vml2_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
-                                vml2_mems[i], ded_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "DED %d\n", i, vml2_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
        }
@@ -6426,16 +6439,16 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
                                                SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                vml2_walker_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, vml2_walker_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
                                                DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
-                                vml2_walker_mems[i], ded_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "DED %d\n", i, vml2_walker_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
        }
@@ -6446,8 +6459,9 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = (data & 0x00006000L) >> 0xd;
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                atc_l2_cache_2m_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, atc_l2_cache_2m_mems[i],
+                               sec_count);
                        err_data->ce_count += sec_count;
                }
        }
@@ -6458,15 +6472,17 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = (data & 0x00006000L) >> 0xd;
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                atc_l2_cache_4k_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, atc_l2_cache_4k_mems[i],
+                               sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = (data & 0x00018000L) >> 0xf;
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
-                                atc_l2_cache_4k_mems[i], ded_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "DED %d\n", i, atc_l2_cache_4k_mems[i],
+                               ded_count);
                        err_data->ue_count += ded_count;
                }
        }
@@ -6479,7 +6495,8 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
        return 0;
 }
 
-static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
+       const struct soc15_reg_entry *reg,
        uint32_t se_id, uint32_t inst_id, uint32_t value,
        uint32_t *sec_count, uint32_t *ded_count)
 {
@@ -6496,7 +6513,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
                                gfx_v9_0_ras_fields[i].sec_count_mask) >>
                                gfx_v9_0_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+                       dev_info(adev->dev, "GFX SubBlock %s, "
+                               "Instance[%d][%d], SEC %d\n",
                                gfx_v9_0_ras_fields[i].name,
                                se_id, inst_id,
                                sec_cnt);
@@ -6507,7 +6525,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
                                gfx_v9_0_ras_fields[i].ded_count_mask) >>
                                gfx_v9_0_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+                       dev_info(adev->dev, "GFX SubBlock %s, "
+                               "Instance[%d][%d], DED %d\n",
                                gfx_v9_0_ras_fields[i].name,
                                se_id, inst_id,
                                ded_cnt);
@@ -6596,9 +6615,10 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
                                reg_value =
                                        RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
                                if (reg_value)
-                                       gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
-                                                       j, k, reg_value,
-                                                       &sec_count, &ded_count);
+                                       gfx_v9_0_ras_error_count(adev,
+                                               &gfx_v9_0_edc_counter_regs[i],
+                                               j, k, reg_value,
+                                               &sec_count, &ded_count);
                        }
                }
        }
@@ -6614,6 +6634,25 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       const unsigned int cp_coher_cntl =
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+       /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+       amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
        .name = "gfx_v9_0",
        .early_init = gfx_v9_0_early_init,
@@ -6660,7 +6699,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               7, /* gfx_v9_0_emit_mem_sync */
        .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6676,11 +6716,12 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
        .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
        .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
        .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
-       .emit_tmz = gfx_v9_0_ring_emit_tmz,
+       .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
        .soft_recovery = gfx_v9_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v9_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -6700,7 +6741,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                2 + /* gfx_v9_0_ring_emit_vm_flush */
-               8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+               8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+               7, /* gfx_v9_0_emit_mem_sync */
        .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
        .emit_ib = gfx_v9_0_ring_emit_ib_compute,
        .emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6715,6 +6757,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+       .emit_mem_sync = gfx_v9_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
old mode 100644 (file)
new mode 100755 (executable)
index dce945e..46351db
@@ -732,7 +732,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
                                          SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 vml2_walker_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
@@ -740,7 +741,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
                                          DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 vml2_walker_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -752,14 +754,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 utcl2_router_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 utcl2_router_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -772,7 +776,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
                                          SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 atc_l2_cache_2m_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
@@ -780,7 +785,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
                                          DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 atc_l2_cache_2m_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -793,7 +799,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
                                          SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 atc_l2_cache_4k_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
@@ -801,7 +808,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
                                          DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 atc_l2_cache_4k_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -816,7 +824,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
        return 0;
 }
 
-static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
+                                   const struct soc15_reg_entry *reg,
                                    uint32_t se_id, uint32_t inst_id,
                                    uint32_t value, uint32_t *sec_count,
                                    uint32_t *ded_count)
@@ -833,7 +842,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
                sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
                          gfx_v9_4_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+                       dev_info(adev->dev,
+                                "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
                                 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
                                 sec_cnt);
                        *sec_count += sec_cnt;
@@ -842,7 +852,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
                ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
                          gfx_v9_4_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+                       dev_info(adev->dev,
+                                "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
                                 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
                                 ded_cnt);
                        *ded_count += ded_cnt;
@@ -876,7 +887,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
                                reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
                                        gfx_v9_4_edc_counter_regs[i]));
                                if (reg_value)
-                                       gfx_v9_4_ras_error_count(
+                                       gfx_v9_4_ras_error_count(adev,
                                                &gfx_v9_4_edc_counter_regs[i],
                                                j, k, reg_value, &sec_count,
                                                &ded_count);
index 9775eca6fe434e044d21741eb5123ca16f2ec340..edaa50d850a6ade0ca3689ae195c868c71fa410a 100644 (file)
@@ -170,6 +170,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
                        dev_err(adev->dev,
                                "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
                                status);
+                       dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+                               REG_GET_FIELD(status,
+                               GCVM_L2_PROTECTION_FAULT_STATUS, CID));
                        dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
                                REG_GET_FIELD(status,
                                GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -369,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
         * translation. Avoid this by doing the invalidation from the SDMA
         * itself.
         */
-       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+                                    &job);
        if (r)
                goto error_alloc;
 
@@ -423,7 +427,13 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
                amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
                kiq->pmf->kiq_invalidate_tlbs(ring,
                                        pasid, flush_type, all_hub);
-               amdgpu_fence_emit_polling(ring, &seq);
+               r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+               if (r) {
+                       amdgpu_ring_undo(ring);
+                       spin_unlock(&adev->gfx.kiq.ring_lock);
+                       return -ETIME;
+               }
+
                amdgpu_ring_commit(ring);
                spin_unlock(&adev->gfx.kiq.ring_lock);
                r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
index b205039350b6c9d469ecdb3d738a71ea3cb40f17..a75e472b4a81ea8494eb4341b606b409d8cea594 100644 (file)
@@ -61,17 +61,6 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
 #define MC_SEQ_MISC0__MT__HBM    0x60000000
 #define MC_SEQ_MISC0__MT__DDR3   0xB0000000
 
-
-static const u32 crtc_offsets[6] =
-{
-       SI_CRTC0_REGISTER_OFFSET,
-       SI_CRTC1_REGISTER_OFFSET,
-       SI_CRTC2_REGISTER_OFFSET,
-       SI_CRTC3_REGISTER_OFFSET,
-       SI_CRTC4_REGISTER_OFFSET,
-       SI_CRTC5_REGISTER_OFFSET
-};
-
 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
 {
        u32 blackout;
@@ -858,7 +847,7 @@ static int gmc_v6_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
        if (r) {
-               dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+               dev_warn(adev->dev, "No suitable DMA available.\n");
                return r;
        }
        adev->need_swiotlb = drm_need_swiotlb(44);
index 9da9596a36388d31e4c2cc5fb106df3c962dcd2f..bcd4baecfe115c15f4a35b5b7a313b1c199d2574 100644 (file)
@@ -762,6 +762,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
  *
  * Print human readable fault information (CIK).
  */
@@ -1019,7 +1020,7 @@ static int gmc_v7_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
        if (r) {
-               pr_warn("amdgpu: No suitable DMA available\n");
+               pr_warn("No suitable DMA available\n");
                return r;
        }
        adev->need_swiotlb = drm_need_swiotlb(40);
index 27d83204fa2b00c3a8e663ceb18402ce39ba24e0..26976e50e2a2917c2762bb9a6fe78b3b4a4e1744 100644 (file)
@@ -1005,6 +1005,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
  *
  * Print human readable fault information (VI).
  */
@@ -1144,7 +1145,7 @@ static int gmc_v8_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
        if (r) {
-               pr_warn("amdgpu: No suitable DMA available\n");
+               pr_warn("No suitable DMA available\n");
                return r;
        }
        adev->need_swiotlb = drm_need_swiotlb(40);
index 8606f877478f89b8fc7fbff1cf9bed83311a4197..055ecba754fff8b7f41700a76daef57102e2ba3b 100644 (file)
@@ -362,6 +362,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
                        dev_err(adev->dev,
                                "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
                                status);
+                       dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+                               REG_GET_FIELD(status,
+                               VM_L2_PROTECTION_FAULT_STATUS, CID));
                        dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
                                REG_GET_FIELD(status,
                                VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -618,7 +621,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
                                                      pasid, 2, all_hub);
                kiq->pmf->kiq_invalidate_tlbs(ring,
                                        pasid, flush_type, all_hub);
-               amdgpu_fence_emit_polling(ring, &seq);
+               r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+               if (r) {
+                       amdgpu_ring_undo(ring);
+                       spin_unlock(&adev->gfx.kiq.ring_lock);
+                       return -ETIME;
+               }
+
                amdgpu_ring_commit(ring);
                spin_unlock(&adev->gfx.kiq.ring_lock);
                r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
index 0debfd9f428c1b236c468277002135bc2a6e7e65..b10c95cad9a2edfd5ebb3bdecdc031c5bfe8f685 100644 (file)
@@ -480,7 +480,8 @@ int jpeg_v1_0_sw_init(void *handle)
 
        ring = &adev->jpeg.inst->ring_dec;
        sprintf(ring->name, "jpeg_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+                            0, AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
index 6173951db7b4527875a6cb53b043a124c1ae8227..e67d09cb1b03ecc06ad1b4ae457c910317aa2244 100644 (file)
@@ -106,7 +106,8 @@ static int jpeg_v2_0_sw_init(void *handle)
        ring->use_doorbell = true;
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "jpeg_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+                            0, AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -169,14 +170,11 @@ static int jpeg_v2_0_hw_init(void *handle)
 static int jpeg_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
 
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index c04c2078a7c1f3655762e871c9147db0bafe8b64..37df3f2e587af51f98b3ba99552bcc7a82842558 100644 (file)
@@ -118,7 +118,8 @@ static int jpeg_v2_5_sw_init(void *handle)
                ring->use_doorbell = true;
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
                sprintf(ring->name, "jpeg_dec_%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
+                                    0, AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
 
old mode 100644 (file)
new mode 100755 (executable)
index 396c2a6..bd2d2b8
@@ -690,7 +690,8 @@ static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
 };
 
-static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
+       const struct soc15_reg_entry *reg,
        uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
 {
        uint32_t i;
@@ -704,7 +705,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
                                mmhub_v1_0_ras_fields[i].sec_count_mask) >>
                                mmhub_v1_0_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
-                       DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+                       dev_info(adev->dev,
+                               "MMHUB SubBlock %s, SEC %d\n",
                                mmhub_v1_0_ras_fields[i].name,
                                sec_cnt);
                        *sec_count += sec_cnt;
@@ -714,7 +716,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
                                mmhub_v1_0_ras_fields[i].ded_count_mask) >>
                                mmhub_v1_0_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
-                       DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+                       dev_info(adev->dev,
+                               "MMHUB SubBlock %s, DED %d\n",
                                mmhub_v1_0_ras_fields[i].name,
                                ded_cnt);
                        *ded_count += ded_cnt;
@@ -739,7 +742,8 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
                reg_value =
                        RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
                if (reg_value)
-                       mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+                       mmhub_v1_0_get_ras_error_count(adev,
+                               &mmhub_v1_0_edc_cnt_regs[i],
                                reg_value, &sec_count, &ded_count);
        }
 
index 37dbe0f2142f54fc7730b212710b4dfea0a9bec3..83b453f5d7176616df5f78346422a2c8197e2bfc 100644 (file)
@@ -26,7 +26,7 @@
 
 #define AI_MAILBOX_POLL_ACK_TIMEDOUT   500
 #define AI_MAILBOX_POLL_MSG_TIMEDOUT   12000
-#define AI_MAILBOX_POLL_FLR_TIMEDOUT   500
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT   5000
 
 enum idh_request {
        IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -46,7 +46,8 @@ enum idh_event {
        IDH_SUCCESS,
        IDH_FAIL,
        IDH_QUERY_ALIVE,
-       IDH_EVENT_MAX
+
+       IDH_TEXT_MESSAGE = 255,
 };
 
 extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
index 237fa5e16b7c9c08282636400317a03321a961d3..ce2bf1fb79ed12a2c7a11cb30eb5c8c9f49eebe0 100644 (file)
@@ -30,7 +30,6 @@
 #include "navi10_ih.h"
 #include "soc15_common.h"
 #include "mxgpu_nv.h"
-#include "mxgpu_ai.h"
 
 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
 {
@@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
  */
 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
 {
-       return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                               mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+       return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
 }
 
 
@@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
 {
        u32 reg;
 
-       reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                                            mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+       reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
        if (reg != event)
                return -ENOENT;
 
@@ -110,7 +107,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
                timeout -= 10;
        } while (timeout > 1);
 
-       pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
 
        return -ETIME;
 }
@@ -118,7 +114,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
              enum idh_request req, u32 data1, u32 data2, u32 data3)
 {
-       u32 reg;
        int r;
        uint8_t trn;
 
@@ -137,19 +132,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
                }
        } while (trn);
 
-       reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                                            mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
-       reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
-                           MSGBUF_DATA, req);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
-                     reg);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
-                               data1);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
-                               data2);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
-                               data3);
-
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
        xgpu_nv_mailbox_set_valid(adev, true);
 
        /* start to poll ack */
@@ -164,23 +150,48 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
                                        enum idh_request req)
 {
        int r;
+       enum idh_event event = -1;
 
        xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
 
-       /* start to check msg if request is idh_req_gpu_init_access */
-       if (req == IDH_REQ_GPU_INIT_ACCESS ||
-               req == IDH_REQ_GPU_FINI_ACCESS ||
-               req == IDH_REQ_GPU_RESET_ACCESS) {
-               r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+       switch (req) {
+       case IDH_REQ_GPU_INIT_ACCESS:
+       case IDH_REQ_GPU_FINI_ACCESS:
+       case IDH_REQ_GPU_RESET_ACCESS:
+               event = IDH_READY_TO_ACCESS_GPU;
+               break;
+       case IDH_REQ_GPU_INIT_DATA:
+               event = IDH_REQ_GPU_INIT_DATA_READY;
+               break;
+       default:
+               break;
+       }
+
+       if (event != -1) {
+               r = xgpu_nv_poll_msg(adev, event);
                if (r) {
-                       pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
-                       return r;
+                       if (req != IDH_REQ_GPU_INIT_DATA) {
+                               pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+                               return r;
+                       }
+                       else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+                               adev->virt.req_init_data_ver = 0;
+               } else {
+                       if (req == IDH_REQ_GPU_INIT_DATA)
+                       {
+                               adev->virt.req_init_data_ver =
+                                       RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
+
+                               /* assume V1 in case host doesn't set version number */
+                               if (adev->virt.req_init_data_ver < 1)
+                                       adev->virt.req_init_data_ver = 1;
+                       }
                }
+
                /* Retrieve checksum from mailbox2 */
                if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
                        adev->virt.fw_reserve.checksum_key =
-                               RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                                       mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+                               RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
                }
        }
 
@@ -213,6 +224,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
        return r;
 }
 
+static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
+{
+       return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        struct amdgpu_iv_entry *entry)
@@ -226,11 +242,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
                                        unsigned type,
                                        enum amdgpu_interrupt_state state)
 {
-       u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+       u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+       if (state == AMDGPU_IRQ_STATE_ENABLE)
+               tmp |= 2;
+       else
+               tmp &= ~2;
 
-       tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
-                               (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+       WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 
        return 0;
 }
@@ -282,11 +301,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
                                       unsigned type,
                                       enum amdgpu_interrupt_state state)
 {
-       u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+       u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+       if (state == AMDGPU_IRQ_STATE_ENABLE)
+               tmp |= 1;
+       else
+               tmp &= ~1;
 
-       tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
-                           (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+       WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 
        return 0;
 }
@@ -378,6 +400,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
        .req_full_gpu   = xgpu_nv_request_full_gpu_access,
        .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
+       .req_init_data  = xgpu_nv_request_init_data,
        .reset_gpu = xgpu_nv_request_reset,
        .wait_reset = NULL,
        .trans_msg = xgpu_nv_mailbox_trans_msg,
index 99b15f6865cb8fb1c7d3fda771ba3fce1c5eec55..52605e14a1a530457f0a41b3577a29efc6ba56b1 100644 (file)
 #define __MXGPU_NV_H__
 
 #define NV_MAILBOX_POLL_ACK_TIMEDOUT   500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT   12000
-#define NV_MAILBOX_POLL_FLR_TIMEDOUT   500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT   6000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT   5000
+
+enum idh_request {
+       IDH_REQ_GPU_INIT_ACCESS = 1,
+       IDH_REL_GPU_INIT_ACCESS,
+       IDH_REQ_GPU_FINI_ACCESS,
+       IDH_REL_GPU_FINI_ACCESS,
+       IDH_REQ_GPU_RESET_ACCESS,
+       IDH_REQ_GPU_INIT_DATA,
+
+       IDH_LOG_VF_ERROR       = 200,
+};
+
+enum idh_event {
+       IDH_CLR_MSG_BUF = 0,
+       IDH_READY_TO_ACCESS_GPU,
+       IDH_FLR_NOTIFICATION,
+       IDH_FLR_NOTIFICATION_CMPL,
+       IDH_SUCCESS,
+       IDH_FAIL,
+       IDH_QUERY_ALIVE,
+       IDH_REQ_GPU_INIT_DATA_READY,
+
+       IDH_TEXT_MESSAGE = 255,
+};
 
 extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
 
@@ -35,7 +59,21 @@ int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
 
-#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
-#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+#define mmMAILBOX_CONTROL 0xE5E
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (mmMAILBOX_CONTROL * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE + 1)
+
+#define mmMAILBOX_MSGBUF_TRN_DW0 0xE56
+#define mmMAILBOX_MSGBUF_TRN_DW1 0xE57
+#define mmMAILBOX_MSGBUF_TRN_DW2 0xE58
+#define mmMAILBOX_MSGBUF_TRN_DW3 0xE59
+
+#define mmMAILBOX_MSGBUF_RCV_DW0 0xE5A
+#define mmMAILBOX_MSGBUF_RCV_DW1 0xE5B
+#define mmMAILBOX_MSGBUF_RCV_DW2 0xE5C
+#define mmMAILBOX_MSGBUF_RCV_DW3 0xE5D
+
+#define mmMAILBOX_INT_CNTL 0xE5F
 
 #endif
index f13dc6cc158f959778c71df70e291439782e5958..713ee66a4d3e20482ed8fee612d1fc988f0a8bcd 100644 (file)
@@ -43,7 +43,8 @@ enum idh_event {
        IDH_READY_TO_ACCESS_GPU,
        IDH_FLR_NOTIFICATION,
        IDH_FLR_NOTIFICATION_CMPL,
-       IDH_EVENT_MAX
+
+       IDH_TEXT_MESSAGE = 255
 };
 
 extern const struct amdgpu_virt_ops xgpu_vi_virt_ops;
index e08245a446fcecffa350f92fb939a1b5ed76570a..f97857ed3c7e046a6d8d56125178333f4e6fe435 100644 (file)
@@ -49,8 +49,48 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
 
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+               if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+                       DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+                       return;
+               }
+       } else {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       }
+
        adev->irq.ih.enabled = true;
+
+       if (adev->irq.ih1.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+                                          RB_ENABLE, 1);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+               }
+               adev->irq.ih1.enabled = true;
+       }
+
+       if (adev->irq.ih2.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+                                          RB_ENABLE, 1);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+               }
+               adev->irq.ih2.enabled = true;
+       }
 }
 
 /**
@@ -66,12 +106,61 @@ static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
 
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+               if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+                       DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+                       return;
+               }
+       } else {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       }
+
        /* set rptr, wptr to 0 */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
        adev->irq.ih.enabled = false;
        adev->irq.ih.rptr = 0;
+
+       if (adev->irq.ih1.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+                                          RB_ENABLE, 0);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+               adev->irq.ih1.enabled = false;
+               adev->irq.ih1.rptr = 0;
+       }
+
+       if (adev->irq.ih2.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+                                          RB_ENABLE, 0);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+               adev->irq.ih2.enabled = false;
+               adev->irq.ih2.rptr = 0;
+       }
+
 }
 
 static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -97,6 +186,43 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
        return ih_rb_cntl;
 }
 
+static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+       u32 ih_doorbell_rtpr = 0;
+
+       if (ih->use_doorbell) {
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+                                                IH_DOORBELL_RPTR, OFFSET,
+                                                ih->doorbell_index);
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+                                                IH_DOORBELL_RPTR,
+                                                ENABLE, 1);
+       } else {
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+                                                IH_DOORBELL_RPTR,
+                                                ENABLE, 0);
+       }
+       return ih_doorbell_rtpr;
+}
+
+static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+{
+       uint32_t tmp;
+
+       /* Reroute to IH ring 1 for VMC */
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+       tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+       tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+       /* Reroute IH ring 1 for UMC */
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+       tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+}
+
 /**
  * navi10_ih_irq_init - init and enable the interrupt ring
  *
@@ -111,7 +237,7 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
 static int navi10_ih_irq_init(struct amdgpu_device *adev)
 {
        struct amdgpu_ih_ring *ih = &adev->irq.ih;
-       u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
+       u32 ih_rb_cntl, ih_chicken;
        u32 tmp;
 
        /* disable irqs */
@@ -127,6 +253,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
        ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
                                   !!adev->irq.msi_enabled);
+       if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+               if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+                       DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+                       return -ETIMEDOUT;
+               }
+       } else {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       }
+       navi10_ih_reroute_ih(adev);
 
        if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
                if (ih->use_bus_addr) {
@@ -137,8 +272,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
                }
        }
 
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-
        /* set the writeback address whether it's enabled or not */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
                     lower_32_bits(ih->wptr_addr));
@@ -149,22 +282,68 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
 
-       ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
-       if (ih->use_doorbell) {
-               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
-                                                IH_DOORBELL_RPTR, OFFSET,
-                                                ih->doorbell_index);
-               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
-                                                IH_DOORBELL_RPTR, ENABLE, 1);
-       } else {
-               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
-                                                IH_DOORBELL_RPTR, ENABLE, 0);
-       }
-       WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+       WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+                       navi10_ih_doorbell_rptr(ih));
 
        adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
                                            ih->doorbell_index);
 
+       ih = &adev->irq.ih1;
+       if (ih->ring_size) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
+                            (ih->gpu_addr >> 40) & 0xff);
+
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+               ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+                                          WPTR_OVERFLOW_ENABLE, 0);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+                                          RB_FULL_DRAIN_ENABLE, 1);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+                               return -ETIMEDOUT;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+               WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+                               navi10_ih_doorbell_rptr(ih));
+       }
+
+       ih = &adev->irq.ih2;
+       if (ih->ring_size) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
+                            (ih->gpu_addr >> 40) & 0xff);
+
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+               ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+                               return -ETIMEDOUT;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+               WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+                            navi10_ih_doorbell_rptr(ih));
+       }
+
+
        tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
        tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
                            CLIENT18_IS_STORM_CLIENT, 1);
@@ -217,7 +396,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
        if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
                goto out;
 
-       reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+       if (ih == &adev->irq.ih)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+       else if (ih == &adev->irq.ih1)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+       else if (ih == &adev->irq.ih2)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+       else
+               BUG();
+
        wptr = RREG32_NO_KIQ(reg);
        if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
                goto out;
@@ -233,7 +420,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
                 wptr, ih->rptr, tmp);
        ih->rptr = tmp;
 
-       reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+       if (ih == &adev->irq.ih)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+       else if (ih == &adev->irq.ih1)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+       else if (ih == &adev->irq.ih2)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+       else
+               BUG();
+
        tmp = RREG32_NO_KIQ(reg);
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(reg, tmp);
@@ -333,8 +528,52 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
 
                if (amdgpu_sriov_vf(adev))
                        navi10_ih_irq_rearm(adev, ih);
-       } else
+       } else if (ih == &adev->irq.ih) {
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
+       } else if (ih == &adev->irq.ih1) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
+       } else if (ih == &adev->irq.ih2) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+       }
+}
+
+/**
+ * navi10_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int navi10_ih_self_irq(struct amdgpu_device *adev,
+                             struct amdgpu_irq_src *source,
+                             struct amdgpu_iv_entry *entry)
+{
+       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+       switch (entry->ring_id) {
+       case 1:
+               *adev->irq.ih1.wptr_cpu = wptr;
+               schedule_work(&adev->irq.ih1_work);
+               break;
+       case 2:
+               *adev->irq.ih2.wptr_cpu = wptr;
+               schedule_work(&adev->irq.ih2_work);
+               break;
+       default: break;
+       }
+       return 0;
+}
+
+static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
+       .process = navi10_ih_self_irq,
+};
+
+static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->irq.self_irq.num_types = 0;
+       adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
 }
 
 static int navi10_ih_early_init(void *handle)
@@ -342,6 +581,7 @@ static int navi10_ih_early_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        navi10_ih_set_interrupt_funcs(adev);
+       navi10_ih_set_self_irq_funcs(adev);
        return 0;
 }
 
@@ -351,6 +591,12 @@ static int navi10_ih_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool use_bus_addr;
 
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+                               &adev->irq.self_irq);
+
+       if (r)
+               return r;
+
        /* use gpu virtual address for ih ring
         * until ih_checken is programmed to allow
         * use bus address for ih ring by psp bl */
@@ -363,6 +609,20 @@ static int navi10_ih_sw_init(void *handle)
        adev->irq.ih.use_doorbell = true;
        adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
 
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+       if (r)
+               return r;
+
+       adev->irq.ih1.use_doorbell = true;
+       adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+       if (r)
+               return r;
+
+       adev->irq.ih2.use_doorbell = true;
+       adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
        r = amdgpu_irq_init(adev);
 
        return r;
@@ -373,6 +633,8 @@ static int navi10_ih_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        amdgpu_irq_fini(adev);
+       amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+       amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
        amdgpu_ih_ring_fini(adev, &adev->irq.ih);
 
        return 0;
index 074a9a09c0a79e80b29296e22843ff962ae04d33..a5b60c9a24189d314f00b7ee1c09d8896ae6f2aa 100644 (file)
 #define SDMA_OP_AQL_COPY  0
 #define SDMA_OP_AQL_BARRIER_OR  0
 
+#define SDMA_GCR_RANGE_IS_PA           (1 << 18)
+#define SDMA_GCR_SEQ(x)                        (((x) & 0x3) << 16)
+#define SDMA_GCR_GL2_WB                        (1 << 15)
+#define SDMA_GCR_GL2_INV               (1 << 14)
+#define SDMA_GCR_GL2_DISCARD           (1 << 13)
+#define SDMA_GCR_GL2_RANGE(x)          (((x) & 0x3) << 11)
+#define SDMA_GCR_GL2_US                        (1 << 10)
+#define SDMA_GCR_GL1_INV               (1 << 9)
+#define SDMA_GCR_GLV_INV               (1 << 8)
+#define SDMA_GCR_GLK_INV               (1 << 7)
+#define SDMA_GCR_GLK_WB                        (1 << 6)
+#define SDMA_GCR_GLM_INV               (1 << 5)
+#define SDMA_GCR_GLM_WB                        (1 << 4)
+#define SDMA_GCR_GL1_RANGE(x)          (((x) & 0x3) << 2)
+#define SDMA_GCR_GLI_INV(x)            (((x) & 0x3) << 0)
+
 /*define for op field*/
 #define SDMA_PKT_HEADER_op_offset 0
 #define SDMA_PKT_HEADER_op_mask   0x000000FF
index f3a3fe746222f39ff681212a40a10feb3777facb..cbcf04578b999b968f49efee40a4e44fad4f59e0 100644 (file)
@@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
        .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
        .get_clockgating_state = nbio_v2_3_get_clockgating_state,
        .ih_control = nbio_v2_3_ih_control,
        .init_registers = nbio_v2_3_init_registers,
-       .detect_hw_virt = nbio_v2_3_detect_hw_virt,
        .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
 };
index 635d9e1fc0a364db991317bec42e6d6edccc29b1..7b2fb050407d2fe785bfa389a92497e2bdbdbedf 100644 (file)
@@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
        .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
 };
 
-static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
        .get_clockgating_state = nbio_v6_1_get_clockgating_state,
        .ih_control = nbio_v6_1_ih_control,
        .init_registers = nbio_v6_1_init_registers,
-       .detect_hw_virt = nbio_v6_1_detect_hw_virt,
 };
index d6cbf26074bca475d915d1ac9f1d6b6c13665130..d34628e113fc389bb1809a0905aad09e12d37d46 100644 (file)
@@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
        .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
 {
 
@@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
        .get_clockgating_state = nbio_v7_0_get_clockgating_state,
        .ih_control = nbio_v7_0_ih_control,
        .init_registers = nbio_v7_0_init_registers,
-       .detect_hw_virt = nbio_v7_0_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
 };
index 149d386590df2beb6b7f034ec46061e5b6c58be8..e629156173d31b46b6e2bada2f5ed2a19710a06a 100644 (file)
@@ -185,7 +185,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
 
        if (use_doorbell) {
                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
-               ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+               ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
        } else
                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
 
@@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
        .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
 };
 
-static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 {
 
@@ -340,14 +323,20 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
                obj->err_data.ce_count += err_data.ce_count;
 
                if (err_data.ce_count)
-                       DRM_INFO("%ld correctable errors detected in %s block\n",
-                               obj->err_data.ce_count, adev->nbio.ras_if->name);
+                       dev_info(adev->dev, "%ld correctable hardware "
+                                       "errors detected in %s block, "
+                                       "no user action is needed.\n",
+                                       obj->err_data.ce_count,
+                                       adev->nbio.ras_if->name);
 
                if (err_data.ue_count)
-                       DRM_INFO("%ld uncorrectable errors detected in %s block\n",
-                               obj->err_data.ue_count, adev->nbio.ras_if->name);
+                       dev_info(adev->dev, "%ld uncorrectable hardware "
+                                       "errors detected in %s block\n",
+                                       obj->err_data.ue_count,
+                                       adev->nbio.ras_if->name);
 
-               DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+               dev_info(adev->dev, "RAS controller interrupt triggered "
+                                       "by NBIF error\n");
 
                /* ras_controller_int is dedicated for nbif ras error,
                 * not the global interrupt for sync flood
@@ -561,7 +550,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
        .get_clockgating_state = nbio_v7_4_get_clockgating_state,
        .ih_control = nbio_v7_4_ih_control,
        .init_registers = nbio_v7_4_init_registers,
-       .detect_hw_virt = nbio_v7_4_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
        .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
        .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
index 033cbbca2072d0eb430738dccbafddb5c718d119..9c42316c47c08c5cd21eb1b9612279443362d8cd 100644 (file)
@@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
        struct smu_context *smu = &adev->smu;
 
        if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
-               if (!adev->in_suspend)
-                       amdgpu_inc_vram_lost(adev);
                ret = smu_baco_enter(smu);
                if (ret)
                        return ret;
@@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
                if (ret)
                        return ret;
        } else {
-               if (!adev->in_suspend)
-                       amdgpu_inc_vram_lost(adev);
                ret = nv_asic_mode1_reset(adev);
        }
 
@@ -457,18 +453,19 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 {
        int r;
 
-       /* Set IP register base before any HW register access */
-       r = nv_reg_base_init(adev);
-       if (r)
-               return r;
-
        adev->nbio.funcs = &nbio_v2_3_funcs;
        adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 
-       adev->nbio.funcs->detect_hw_virt(adev);
-
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
                adev->virt.ops = &xgpu_nv_virt_ops;
+               /* try send GPU_INIT_DATA request to host */
+               amdgpu_virt_request_init_data(adev);
+       }
+
+       /* Set IP register base before any HW register access */
+       r = nv_reg_base_init(adev);
+       if (r)
+               return r;
 
        switch (adev->asic_type) {
        case CHIP_NAVI10:
@@ -501,8 +498,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
                amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
                amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
-               if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
-                   !amdgpu_sriov_vf(adev))
+               if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
index 1de984647dbbfcc790ae762cc35768332d90e849..fd6b58243b03279b52d5f042e39d2727370924e9 100644 (file)
 #define        PACKET3_BLK_CNTX_UPDATE                         0x53
 #define        PACKET3_INCR_UPDT_STATE                         0x55
 #define        PACKET3_ACQUIRE_MEM                             0x58
+/* 1.  HEADER
+ * 2.  COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 2.  COHER_SIZE [31:0]
+ * 3.  COHER_SIZE_HI [7:0]
+ * 4.  COHER_BASE_LO [31:0]
+ * 5.  COHER_BASE_HI [23:0]
+ * 7.  POLL_INTERVAL [15:0]
+ * 8.  GCR_CNTL [18:0]
+ */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(x) ((x) << 0)
+               /*
+                * 0:NOP
+                * 1:ALL
+                * 2:RANGE
+                * 3:FIRST_LAST
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_RANGE(x) ((x) << 2)
+               /*
+                * 0:ALL
+                * 1:reserved
+                * 2:RANGE
+                * 3:FIRST_LAST
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(x) ((x) << 4)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(x) ((x) << 5)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_WB(x) ((x) << 6)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(x) ((x) << 7)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(x) ((x) << 8)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(x) ((x) << 9)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_US(x) ((x) << 10)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_RANGE(x) ((x) << 11)
+               /*
+                * 0:ALL
+                * 1:VOL
+                * 2:RANGE
+                * 3:FIRST_LAST
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_DISCARD(x)  ((x) << 13)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(x) ((x) << 14)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(x) ((x) << 15)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_SEQ(x) ((x) << 16)
+               /*
+                * 0: PARALLEL
+                * 1: FORWARD
+                * 2: REVERSE
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA  (1 << 18)
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_INTERRUPT                               0x5A
 #define        PACKET3_GEN_PDEPTE                              0x5B
 #define        PACKET3_GET_LOD_STATS                           0x8E
 #define        PACKET3_DRAW_MULTI_PREAMBLE                     0x8F
 #define        PACKET3_FRAME_CONTROL                           0x90
+#                      define FRAME_TMZ        (1 << 0)
 #                      define FRAME_CMD(x) ((x) << 28)
                        /*
                         * x=0: tmz_begin
index 7539104175e8ae451520e618c4886b67f5d35e37..90727cfb4447fe30a53afcc6ce2131ef61bf98d9 100644 (file)
@@ -50,7 +50,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
        const char *chip_name;
        char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *hdr;
        const struct ta_firmware_header_v1_0 *ta_hdr;
        DRM_DEBUG("\n");
 
@@ -66,22 +65,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+       err = psp_init_asd_microcode(psp, chip_name);
        if (err)
                goto out;
 
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
-       if (err)
-               goto out;
-
-       hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)hdr +
-                               le32_to_cpu(hdr->header.ucode_array_offset_bytes);
-
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
        err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
        if (err) {
@@ -126,8 +113,6 @@ out:
                dev_err(adev->dev,
                        "psp v10.0: Failed to load firmware \"%s\"\n",
                        fw_name);
-               release_firmware(adev->psp.asd_fw);
-               adev->psp.asd_fw = NULL;
        }
 
        return err;
@@ -230,129 +215,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v10_0_sram_map(struct amdgpu_device *adev,
-                  unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                  unsigned int *sram_data_reg_offset,
-                  enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
-                                       struct amdgpu_firmware_info *ucode,
-                                       enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (!ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
-
 static int psp_v10_0_mode1_reset(struct psp_context *psp)
 {
        DRM_INFO("psp mode 1 reset not supported now! \n");
@@ -379,7 +241,6 @@ static const struct psp_funcs psp_v10_0_funcs = {
        .ring_create = psp_v10_0_ring_create,
        .ring_stop = psp_v10_0_ring_stop,
        .ring_destroy = psp_v10_0_ring_destroy,
-       .compare_sram_data = psp_v10_0_compare_sram_data,
        .mode1_reset = psp_v10_0_mode1_reset,
        .ring_get_wptr = psp_v10_0_ring_get_wptr,
        .ring_set_wptr = psp_v10_0_ring_set_wptr,
index 0afd610a1263faeb276de8ca284eeb4f1308e497..1de89cc3c3559e4f80c360dade988c61933e031d 100644 (file)
@@ -75,10 +75,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
        const char *chip_name;
        char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *sos_hdr;
-       const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
-       const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
-       const struct psp_firmware_header_v1_0 *asd_hdr;
        const struct ta_firmware_header_v1_0 *ta_hdr;
 
        DRM_DEBUG("\n");
@@ -103,66 +99,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
-       err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+       err = psp_init_sos_microcode(psp, chip_name);
        if (err)
-               goto out;
+               return err;
 
-       err = amdgpu_ucode_validate(adev->psp.sos_fw);
+       err = psp_init_asd_microcode(psp, chip_name);
        if (err)
-               goto out;
-
-       sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
-       amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
-
-       switch (sos_hdr->header.header_version_major) {
-       case 1:
-               adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
-               adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
-               adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
-               adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
-               adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
-                               le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
-               adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                               le32_to_cpu(sos_hdr->sos_offset_bytes);
-               if (sos_hdr->header.header_version_minor == 1) {
-                       sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
-                       adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
-                       adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                                       le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
-                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
-                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                                       le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
-               }
-               if (sos_hdr->header.header_version_minor == 2) {
-                       sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
-                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
-                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                                                   le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
-               }
-               break;
-       default:
-               dev_err(adev->dev,
-                       "Unsupported psp sos firmware\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
-       if (err)
-               goto out1;
-
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
-       if (err)
-               goto out1;
-
-       asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
-                               le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+               return err;
 
        switch (adev->asic_type) {
        case CHIP_VEGA20:
@@ -194,6 +137,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
        case CHIP_NAVI10:
        case CHIP_NAVI14:
        case CHIP_NAVI12:
+               if (amdgpu_sriov_vf(adev))
+                       break;
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
                err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
                if (err) {
@@ -229,15 +174,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 out2:
        release_firmware(adev->psp.ta_fw);
        adev->psp.ta_fw = NULL;
-out1:
-       release_firmware(adev->psp.asd_fw);
-       adev->psp.asd_fw = NULL;
-out:
-       dev_err(adev->dev,
-               "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
-       release_firmware(adev->psp.sos_fw);
-       adev->psp.sos_fw = NULL;
-
        return err;
 }
 
@@ -283,11 +219,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
        /* Check tOS sign of life register to confirm sys driver and sOS
         * are already been loaded.
         */
-       if (psp_v11_0_is_sos_alive(psp)) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (psp_v11_0_is_sos_alive(psp))
                return 0;
-       }
 
        ret = psp_v11_0_wait_for_bootloader(psp);
        if (ret)
@@ -319,11 +252,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
        /* Check sOS sign of life register to confirm sys driver and sOS
         * are already been loaded.
         */
-       if (psp_v11_0_is_sos_alive(psp)) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (psp_v11_0_is_sos_alive(psp))
                return 0;
-       }
 
        ret = psp_v11_0_wait_for_bootloader(psp);
        if (ret)
@@ -446,13 +376,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
        return 0;
 }
 
-static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
-{
-       if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
-               return true;
-       return false;
-}
-
 static int psp_v11_0_ring_stop(struct psp_context *psp,
                              enum psp_ring_type ring_type)
 {
@@ -460,7 +383,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
        struct amdgpu_device *adev = psp->adev;
 
        /* Write the ring destroy command*/
-       if (psp_v11_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                                     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
        else
@@ -471,7 +394,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
        mdelay(20);
 
        /* Wait for response flag (bit 31) */
-       if (psp_v11_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
                                   0x80000000, 0x80000000, false);
        else
@@ -489,7 +412,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
        struct psp_ring *ring = &psp->km_ring;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v11_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                ret = psp_v11_0_ring_stop(psp, ring_type);
                if (ret) {
                        DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
@@ -567,138 +490,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v11_0_sram_map(struct amdgpu_device *adev,
-                 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               if (adev->asic_type < CHIP_NAVI10) {
-                       *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-                       *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               } else {
-                       *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10;
-                       *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10;
-               }
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               if (adev->asic_type < CHIP_NAVI10) {
-                       *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-                       *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               } else {
-                       *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10;
-                       *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10;
-               }
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v11_0_compare_sram_data(struct psp_context *psp,
-                                      struct amdgpu_firmware_info *ucode,
-                                      enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
 static int psp_v11_0_mode1_reset(struct psp_context *psp)
 {
        int ret;
@@ -733,181 +524,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
        return 0;
 }
 
-/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
- * For now, return success and hack the hive_id so high level code can
- * start testing
- */
-static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
-       int number_devices, struct psp_xgmi_topology_info *topology)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
-       struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
-       int i;
-       int ret;
-
-       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
-               return -EINVAL;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       /* Fill in the shared memory with topology information as input */
-       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
-       topology_info_input->num_nodes = number_devices;
-
-       for (i = 0; i < topology_info_input->num_nodes; i++) {
-               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
-               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
-               topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
-               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
-       }
-
-       /* Invoke xgmi ta to get the topology information */
-       ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
-       if (ret)
-               return ret;
-
-       /* Read the output topology information from the shared memory */
-       topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
-       topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
-       for (i = 0; i < topology->num_nodes; i++) {
-               topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
-               topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
-               topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
-               topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
-       }
-
-       return 0;
-}
-
-static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
-       int number_devices, struct psp_xgmi_topology_info *topology)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
-       int i;
-
-       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
-               return -EINVAL;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
-       topology_info_input->num_nodes = number_devices;
-
-       for (i = 0; i < topology_info_input->num_nodes; i++) {
-               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
-               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
-               topology_info_input->nodes[i].is_sharing_enabled = 1;
-               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
-       }
-
-       /* Invoke xgmi ta to set topology information */
-       return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
-}
-
-static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       int ret;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
-
-       /* Invoke xgmi ta to get hive id */
-       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
-       if (ret)
-               return ret;
-
-       *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
-
-       return 0;
-}
-
-static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       int ret;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
-
-       /* Invoke xgmi ta to get the node id */
-       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
-       if (ret)
-               return ret;
-
-       *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
-
-       return 0;
-}
-
-static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
-               struct ta_ras_trigger_error_input *info)
-{
-       struct ta_ras_shared_memory *ras_cmd;
-       int ret;
-
-       if (!psp->ras.ras_initialized)
-               return -EINVAL;
-
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
-       memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
-       ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
-       ras_cmd->ras_in_message.trigger_error = *info;
-
-       ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
-       if (ret)
-               return -EINVAL;
-
-       /* If err_event_athub occurs error inject was successful, however
-          return status from TA is no long reliable */
-       if (amdgpu_ras_intr_triggered())
-               return 0;
-
-       return ras_cmd->ras_status;
-}
-
-static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
-{
-#if 0
-       // not support yet.
-       struct ta_ras_shared_memory *ras_cmd;
-       int ret;
-
-       if (!psp->ras.ras_initialized)
-               return -EINVAL;
-
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
-       memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
-       ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
-       ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
-
-       ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
-       if (ret)
-               return -EINVAL;
-
-       return ras_cmd->ras_status;
-#else
-       return -EINVAL;
-#endif
-}
-
-static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
-{
-       return psp_rlc_autoload_start(psp);
-}
-
 static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
 {
        int ret;
@@ -1099,7 +715,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
        uint32_t data;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v11_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -1111,7 +727,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
 {
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v11_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
        } else
@@ -1203,16 +819,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
        .ring_create = psp_v11_0_ring_create,
        .ring_stop = psp_v11_0_ring_stop,
        .ring_destroy = psp_v11_0_ring_destroy,
-       .compare_sram_data = psp_v11_0_compare_sram_data,
        .mode1_reset = psp_v11_0_mode1_reset,
-       .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
-       .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
-       .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
-       .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
-       .support_vmr_ring = psp_v11_0_support_vmr_ring,
-       .ras_trigger_error = psp_v11_0_ras_trigger_error,
-       .ras_cure_posion = psp_v11_0_ras_cure_posion,
-       .rlc_autoload_start = psp_v11_0_rlc_autoload_start,
        .mem_training_init = psp_v11_0_memory_training_init,
        .mem_training_fini = psp_v11_0_memory_training_fini,
        .mem_training = psp_v11_0_memory_training,
index 58d8b6d732e8f0b6e26fc52738dad013813c03e7..6c9614f77d33e5ffae5004d6e20bb8cc24a36464 100644 (file)
@@ -45,11 +45,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
        const char *chip_name;
-       char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *asd_hdr;
-
-       DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
        case CHIP_RENOIR:
@@ -59,28 +55,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
                BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
-       if (err)
-               goto out1;
-
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
-       if (err)
-               goto out1;
-
-       asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
-                               le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
-
-       return 0;
-
-out1:
-       release_firmware(adev->psp.asd_fw);
-       adev->psp.asd_fw = NULL;
-
+       err = psp_init_asd_microcode(psp, chip_name);
        return err;
 }
 
@@ -95,11 +70,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
         * are already been loaded.
         */
        sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-       if (sol_reg) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (sol_reg)
                return 0;
-       }
 
        /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
        ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -228,13 +200,6 @@ static int psp_v12_0_ring_init(struct psp_context *psp,
        return 0;
 }
 
-static bool psp_v12_0_support_vmr_ring(struct psp_context *psp)
-{
-       if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
-               return true;
-       return false;
-}
-
 static int psp_v12_0_ring_create(struct psp_context *psp,
                                enum psp_ring_type ring_type)
 {
@@ -243,7 +208,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
        struct psp_ring *ring = &psp->km_ring;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v12_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(psp->adev)) {
                /* Write low address of the ring to C2PMSG_102 */
                psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -295,7 +260,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
        struct amdgpu_device *adev = psp->adev;
 
        /* Write the ring destroy command*/
-       if (psp_v12_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                                     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
        else
@@ -306,7 +271,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
        mdelay(20);
 
        /* Wait for response flag (bit 31) */
-       if (psp_v12_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
                                   0x80000000, 0x80000000, false);
        else
@@ -334,128 +299,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v12_0_sram_map(struct amdgpu_device *adev,
-                 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v12_0_compare_sram_data(struct psp_context *psp,
-                                      struct amdgpu_firmware_info *ucode,
-                                      enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
 static int psp_v12_0_mode1_reset(struct psp_context *psp)
 {
        int ret;
@@ -495,7 +338,7 @@ static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
        uint32_t data;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v12_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -507,7 +350,7 @@ static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
 {
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v12_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
        } else
@@ -522,7 +365,6 @@ static const struct psp_funcs psp_v12_0_funcs = {
        .ring_create = psp_v12_0_ring_create,
        .ring_stop = psp_v12_0_ring_stop,
        .ring_destroy = psp_v12_0_ring_destroy,
-       .compare_sram_data = psp_v12_0_compare_sram_data,
        .mode1_reset = psp_v12_0_mode1_reset,
        .ring_get_wptr = psp_v12_0_ring_get_wptr,
        .ring_set_wptr = psp_v12_0_ring_set_wptr,
index 735c43c7daab921e283f8bcf7aedcde5c8cae386..f2e725f72d2f1644e1a786084df6782c4fe9aad3 100644 (file)
@@ -50,9 +50,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
 
 #define smnMP1_FIRMWARE_FLAGS 0x3010028
 
-static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
-
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp);
 static int psp_v3_1_ring_stop(struct psp_context *psp,
                              enum psp_ring_type ring_type);
 
@@ -60,9 +57,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
        const char *chip_name;
-       char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *hdr;
 
        DRM_DEBUG("\n");
 
@@ -76,55 +71,15 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
-       err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
-       if (err)
-               goto out;
-
-       err = amdgpu_ucode_validate(adev->psp.sos_fw);
+       err = psp_init_sos_microcode(psp, chip_name);
        if (err)
-               goto out;
-
-       hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
-       adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
-       adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-       adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
-       adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
-                                       le32_to_cpu(hdr->sos_size_bytes);
-       adev->psp.sys_start_addr = (uint8_t *)hdr +
-                               le32_to_cpu(hdr->header.ucode_array_offset_bytes);
-       adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                               le32_to_cpu(hdr->sos_offset_bytes);
-
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
-       if (err)
-               goto out;
+               return err;
 
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
+       err = psp_init_asd_microcode(psp, chip_name);
        if (err)
-               goto out;
-
-       hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)hdr +
-                               le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+               return err;
 
        return 0;
-out:
-       if (err) {
-               dev_err(adev->dev,
-                       "psp v3.1: Failed to load firmware \"%s\"\n",
-                       fw_name);
-               release_firmware(adev->psp.sos_fw);
-               adev->psp.sos_fw = NULL;
-               release_firmware(adev->psp.asd_fw);
-               adev->psp.asd_fw = NULL;
-       }
-
-       return err;
 }
 
 static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
@@ -168,41 +123,19 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
        return ret;
 }
 
-static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
-{
-       int i;
-
-       if (ver == adev->psp.sos_fw_version)
-               return true;
-
-       /*
-        * Double check if the latest four legacy versions.
-        * If yes, it is still the right version.
-        */
-       for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
-               if (sos_old_versions[i] == adev->psp.sos_fw_version)
-                       return true;
-       }
-
-       return false;
-}
-
 static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
 {
        int ret;
        unsigned int psp_gfxdrv_command_reg = 0;
        struct amdgpu_device *adev = psp->adev;
-       uint32_t sol_reg, ver;
+       uint32_t sol_reg;
 
        /* Check sOS sign of life register to confirm sys driver and sOS
         * are already been loaded.
         */
        sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-       if (sol_reg) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (sol_reg)
                return 0;
-       }
 
        /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
        ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -227,11 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
        ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
                           RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
                           0, true);
-
-       ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-       if (!psp_v3_1_match_version(adev, ver))
-               DRM_WARN("SOS version doesn't match\n");
-
        return ret;
 }
 
@@ -302,7 +230,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
 
        psp_v3_1_reroute_ih(psp);
 
-       if (psp_v3_1_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                ret = psp_v3_1_ring_stop(psp, ring_type);
                if (ret) {
                        DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
@@ -360,34 +288,26 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
                              enum psp_ring_type ring_type)
 {
        int ret = 0;
-       unsigned int psp_ring_reg = 0;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v3_1_support_vmr_ring(psp)) {
-               /* Write the Destroy GPCOM ring command to C2PMSG_101 */
-               psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING;
-               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
-
-               /* there might be handshake issue which needs delay */
-               mdelay(20);
-
-               /* Wait for response flag (bit 31) in C2PMSG_101 */
-               ret = psp_wait_for(psp,
-                               SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
-                               0x80000000, 0x80000000, false);
-       } else {
-               /* Write the ring destroy command to C2PMSG_64 */
-               psp_ring_reg = 3 << 16;
-               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+       /* Write the ring destroy command*/
+       if (amdgpu_sriov_vf(adev))
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+                                    GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+       else
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+                                    GFX_CTRL_CMD_ID_DESTROY_RINGS);
 
-               /* there might be handshake issue which needs delay */
-               mdelay(20);
+       /* there might be handshake issue with hardware which needs delay */
+       mdelay(20);
 
-               /* Wait for response flag (bit 31) in C2PMSG_64 */
-               ret = psp_wait_for(psp,
-                               SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-                               0x80000000, 0x80000000, false);
-       }
+       /* Wait for response flag (bit 31) */
+       if (amdgpu_sriov_vf(adev))
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+                                  0x80000000, 0x80000000, false);
+       else
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+                                  0x80000000, 0x80000000, false);
 
        return ret;
 }
@@ -410,128 +330,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v3_1_sram_map(struct amdgpu_device *adev,
-                 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
-                                      struct amdgpu_firmware_info *ucode,
-                                      enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
 static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
@@ -575,20 +373,12 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
        return 0;
 }
 
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
-{
-       if (amdgpu_sriov_vf(psp->adev))
-               return true;
-
-       return false;
-}
-
 static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
 {
        uint32_t data;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v3_1_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -599,7 +389,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
 {
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v3_1_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                /* send interrupt to PSP for SRIOV ring write pointer update */
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
@@ -616,10 +406,8 @@ static const struct psp_funcs psp_v3_1_funcs = {
        .ring_create = psp_v3_1_ring_create,
        .ring_stop = psp_v3_1_ring_stop,
        .ring_destroy = psp_v3_1_ring_destroy,
-       .compare_sram_data = psp_v3_1_compare_sram_data,
        .smu_reload_quirk = psp_v3_1_smu_reload_quirk,
        .mode1_reset = psp_v3_1_mode1_reset,
-       .support_vmr_ring = psp_v3_1_support_vmr_ring,
        .ring_get_wptr = psp_v3_1_ring_get_wptr,
        .ring_set_wptr = psp_v3_1_ring_set_wptr,
 };
index 7d509a40076fa021f3724a4258fa779afe005a64..5f304d61999ebf9c3257ebfcbd48e46a4d26b2a5 100644 (file)
@@ -355,8 +355,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -614,7 +612,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -874,7 +873,8 @@ static int sdma_v2_4_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1200,7 +1200,8 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
                SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
index b6109a99fc43c4e35d27de930b93756d5c612d8e..c59f6f6f4c0917c161e3214ca8428f4440bfd95b 100644 (file)
@@ -529,8 +529,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -886,7 +884,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -1158,7 +1157,8 @@ static int sdma_v3_0_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1638,7 +1638,8 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
                SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
index 5f3a5ee2a3f4e20948678c36e733e8d8875d8752..ebd723a0bcfced12ea9d5e9beb2ecd744e6eb775 100644 (file)
@@ -115,17 +115,21 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
@@ -174,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -203,6 +208,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -222,27 +228,35 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
-       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
@@ -923,8 +937,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
-               sdma[i]->sched.ready = false;
        }
 }
 
@@ -971,8 +983,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
                                        IB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
-               sdma[i]->sched.ready = false;
        }
 }
 
@@ -1539,7 +1549,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -1840,7 +1851,7 @@ static int sdma_v4_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                ring->use_doorbell = true;
 
-               DRM_INFO("use_doorbell being set to: [%s]\n",
+               DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
                                ring->use_doorbell?"true":"false");
 
                /* doorbell size is 2 dwords, get DWORD offset */
@@ -1848,7 +1859,8 @@ static int sdma_v4_0_sw_init(void *handle)
 
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+                                    AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
 
@@ -1866,7 +1878,8 @@ static int sdma_v4_0_sw_init(void *handle)
                        sprintf(ring->name, "page%d", i);
                        r = amdgpu_ring_init(adev, ring, 1024,
                                             &adev->sdma.trap_irq,
-                                            AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+                                            AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -2445,10 +2458,12 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+               SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
        ib->ptr[ib->length_dw++] = byte_count - 1;
        ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
index ebfd2cdf4e651215bc5ee0bf6d54be8ec9f46762..b544baf306f27b3a52920381bd1301afe56ba318 100644 (file)
@@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
 };
 
+static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+};
+
 static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -141,9 +164,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
                                                (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
                break;
        case CHIP_NAVI12:
-               soc15_program_register_sequence(adev,
-                                               golden_settings_sdma_5,
-                                               (const u32)ARRAY_SIZE(golden_settings_sdma_5));
+               if (amdgpu_sriov_vf(adev))
+                       soc15_program_register_sequence(adev,
+                                                       golden_settings_sdma_5_sriov,
+                                                       (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
+               else
+                       soc15_program_register_sequence(adev,
+                                                       golden_settings_sdma_5,
+                                                       (const u32)ARRAY_SIZE(golden_settings_sdma_5));
                soc15_program_register_sequence(adev,
                                                golden_settings_sdma_nv12,
                                                (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
@@ -382,6 +410,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
 
+       /* Invalidate L2, because if we don't do it, we might get stale cache
+        * lines from previous IBs.
+        */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
+                                SDMA_GCR_GL2_WB |
+                                SDMA_GCR_GLM_INV |
+                                SDMA_GCR_GLM_WB) << 16);
+       amdgpu_ring_write(ring, 0xffffff80);
+       amdgpu_ring_write(ring, 0xffff);
+
        /* An IB packet must end on a 8 DW boundary--the next dword
         * must be on a 8-dword boundary. Our IB packet below is 6
         * dwords long, thus add x number of NOPs, such that, in
@@ -502,9 +542,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
        }
-
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -529,7 +566,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
  */
 static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 {
-       u32 f32_cntl, phase_quantum = 0;
+       u32 f32_cntl = 0, phase_quantum = 0;
        int i;
 
        if (amdgpu_sdma_phase_quantum) {
@@ -557,9 +594,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
        }
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
-               f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
-                               AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+               if (!amdgpu_sriov_vf(adev)) {
+                       f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+                                                AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+               }
+
                if (enable && amdgpu_sdma_phase_quantum) {
                        WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
                               phase_quantum);
@@ -568,7 +608,8 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
                        WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
                               phase_quantum);
                }
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+               if (!amdgpu_sriov_vf(adev))
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
        }
 
 }
@@ -591,6 +632,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
                sdma_v5_0_rlc_stop(adev);
        }
 
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
                f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
@@ -623,7 +667,8 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+               if (!amdgpu_sriov_vf(adev))
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
 
                /* Set ring buffer size in dwords */
                rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -699,26 +744,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                /* set minor_ptr_update to 0 after wptr programed */
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
 
-               /* set utc l1 enable flag always to 1 */
-               temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
-               temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
-               /* enable MCBP */
-               temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
-               /* Set up RESP_MODE to non-copy addresses */
-               temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
-               temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
-               temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
-               /* program default cache read and write policy */
-               temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
-               /* clean read policy and write policy bits */
-               temp &= 0xFF0FFF;
-               temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+               if (!amdgpu_sriov_vf(adev)) {
+                       /* set utc l1 enable flag always to 1 */
+                       temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+                       temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+                       /* enable MCBP */
+                       temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+                       /* Set up RESP_MODE to non-copy addresses */
+                       temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+                       temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+                       temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+                       /* program default cache read and write policy */
+                       temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+                       /* clean read policy and write policy bits */
+                       temp &= 0xFF0FFF;
+                       temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+               }
 
                if (!amdgpu_sriov_vf(adev)) {
                        /* unhalt engine */
@@ -948,7 +995,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err0;
@@ -1224,7 +1272,7 @@ static int sdma_v5_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                ring->use_doorbell = true;
 
-               DRM_INFO("use_doorbell being set to: [%s]\n",
+               DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
                                ring->use_doorbell?"true":"false");
 
                ring->doorbell_index = (i == 0) ?
@@ -1236,7 +1284,8 @@ static int sdma_v5_0_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1387,14 +1436,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
 {
        u32 sdma_cntl;
 
-       u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
-               sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
-               sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
+       if (!amdgpu_sriov_vf(adev)) {
+               u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+                       sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+                       sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
 
-       sdma_cntl = RREG32(reg_offset);
-       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
-                      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
-       WREG32(reg_offset, sdma_cntl);
+               sdma_cntl = RREG32(reg_offset);
+               sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+                                         state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+               WREG32(reg_offset, sdma_cntl);
+       }
 
        return 0;
 }
@@ -1595,7 +1646,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
                10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
-       .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
+       .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
        .emit_ib = sdma_v5_0_ring_emit_ib,
        .emit_fence = sdma_v5_0_ring_emit_fence,
        .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
@@ -1655,10 +1706,12 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+               SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
        ib->ptr[ib->length_dw++] = byte_count - 1;
        ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
index 4d415bfdb42ff2ac62e4431d90a64a9ee3692b2d..153db3f763bc15d4a012b413e835208b01af11b5 100644 (file)
@@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
        return 0;
 }
 
-static void si_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
@@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
 
 int si_set_ip_blocks(struct amdgpu_device *adev)
 {
-       si_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_VERDE:
        case CHIP_TAHITI:
index 42d5601b6bf35233f7fbea9cad6181da0ce6198f..7d2bbcbe547b2ad53c5b091c2bf09e055dfdf2e1 100644 (file)
@@ -124,7 +124,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, false);
-               ring->sched.ready = false;
        }
 }
 
@@ -267,7 +266,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -504,7 +504,8 @@ static int si_dma_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -775,7 +776,8 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
 static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
                                              1, 0, 0, byte_count);
index 0860e85a2d358be2707a69f37207f3d08f27a6ef..c00ba4b23c9a6fec651eb748f52947d5b63fe4ac 100644 (file)
@@ -345,26 +345,6 @@ static const struct si_dte_data dte_data_tahiti =
        false
 };
 
-#if 0
-static const struct si_dte_data dte_data_tahiti_le =
-{
-       { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
-       { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
-       0x5,
-       0xAFC8,
-       0x64,
-       0x32,
-       1,
-       0,
-       0x10,
-       { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
-       { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
-       { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
-       85,
-       true
-};
-#endif
-
 static const struct si_dte_data dte_data_tahiti_pro =
 {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
index a40499d51c93cd9a1fd3f262402f92c6ae592dc4..58a440a15525a932d9f4bfddb167fabee407903d 100644 (file)
@@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
 
        switch (soc15_asic_reset_method(adev)) {
                case AMD_RESET_METHOD_BACO:
-                       if (!adev->in_suspend)
-                               amdgpu_inc_vram_lost(adev);
                        return soc15_asic_baco_reset(adev);
                case AMD_RESET_METHOD_MODE2:
                        return amdgpu_dpm_mode2_reset(adev);
                default:
-                       if (!adev->in_suspend)
-                               amdgpu_inc_vram_lost(adev);
                        return soc15_asic_mode1_reset(adev);
        }
 }
@@ -712,7 +708,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                adev->df.funcs = &df_v1_7_funcs;
 
        adev->rev_id = soc15_get_rev_id(adev);
-       adev->nbio.funcs->detect_hw_virt(adev);
 
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_ai_virt_ops;
@@ -1222,7 +1217,7 @@ static int soc15_common_early_init(void *handle)
                        AMD_CG_SUPPORT_IH_CG |
                        AMD_CG_SUPPORT_VCN_MGCG |
                        AMD_CG_SUPPORT_JPEG_MGCG;
-               adev->pg_flags = 0;
+               adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
                adev->external_rev_id = adev->rev_id + 0x32;
                break;
        case CHIP_RENOIR:
index c893c645a4b2d5a22159e17c4e302690c3008e6c..56d02aa690a70149427a96138256af36eacbd1b8 100644 (file)
@@ -35,6 +35,9 @@
 #define RREG32_SOC15(ip, inst, reg) \
        RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
 
+#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
+       RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
 #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
        RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
 
index edfe50821cd9bf5e042fc001eb7ca204660f9aed..799925d22fc81cfda76722859e044abef24388a7 100644 (file)
 #              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
 #              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
 #              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
-#define        PACKET3_AQUIRE_MEM                              0x58
+#define        PACKET3_ACQUIRE_MEM                             0x58
+/* 1.  HEADER
+ * 2.  COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 3.  COHER_SIZE [31:0]
+ * 4.  COHER_SIZE_HI [7:0]
+ * 5.  COHER_BASE_LO [31:0]
+ * 6.  COHER_BASE_HI [23:0]
+ * 7.  POLL_INTERVAL [15:0]
+ */
+/* COHER_CNTL fields for CP_COHER_CNTL */
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_NC_ACTION_ENA(x) ((x) << 3)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WC_ACTION_ENA(x) ((x) << 4)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_INV_METADATA_ACTION_ENA(x) ((x) << 5)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_VOL_ACTION_ENA(x) ((x) << 15)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(x) ((x) << 18)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(x) ((x) << 22)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(x) ((x) << 23)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_CB_ACTION_ENA(x) ((x) << 25)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_DB_ACTION_ENA(x) ((x) << 26)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(x) ((x) << 27)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_VOL_ACTION_ENA(x) ((x) << 28)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30)
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_LOAD_UCONFIG_REG                        0x5E
 #define        PACKET3_LOAD_SH_REG                             0x5F
 #define        PACKET3_WAIT_ON_DE_COUNTER_DIFF                 0x88
 #define        PACKET3_SWITCH_BUFFER                           0x8B
 #define PACKET3_FRAME_CONTROL                          0x90
+#                      define FRAME_TMZ        (1 << 0)
 #                      define FRAME_CMD(x) ((x) << 28)
                        /*
                         * x=0: tmz_begin
index ca7d05993ca2f5961afcbecd528bb0f54ba08979..745ed0fba1ed9e56acfe70ea66b3f258ef8eb2f0 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef _TA_RAS_IF_H
 #define _TA_RAS_IF_H
 
+#define RAS_TA_HOST_IF_VER     0
+
 /* Responses have bit 31 set */
 #define RSP_ID_MASK (1U << 31)
 #define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
@@ -36,18 +38,24 @@ enum ras_command {
        TA_RAS_COMMAND__TRIGGER_ERROR,
 };
 
-enum ta_ras_status {
-       TA_RAS_STATUS__SUCCESS                          = 0x00,
-       TA_RAS_STATUS__RESET_NEEDED                     = 0x01,
-       TA_RAS_STATUS__ERROR_INVALID_PARAMETER          = 0x02,
-       TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE          = 0x03,
-       TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD          = 0x04,
-       TA_RAS_STATUS__ERROR_INJECTION_FAILED           = 0x05,
-       TA_RAS_STATUS__ERROR_ASD_READ_WRITE             = 0x06,
-       TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE           = 0x07,
-       TA_RAS_STATUS__ERROR_TIMEOUT                    = 0x08,
-       TA_RAS_STATUS__ERROR_BLOCK_DISABLED             = 0x09,
-       TA_RAS_STATUS__ERROR_GENERIC                    = 0x10,
+enum ta_ras_status
+{
+       TA_RAS_STATUS__SUCCESS                          = 0x00,
+       TA_RAS_STATUS__RESET_NEEDED                     = 0xA001,
+       TA_RAS_STATUS__ERROR_INVALID_PARAMETER          = 0xA002,
+       TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE          = 0xA003,
+       TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD          = 0xA004,
+       TA_RAS_STATUS__ERROR_INJECTION_FAILED           = 0xA005,
+       TA_RAS_STATUS__ERROR_ASD_READ_WRITE             = 0xA006,
+       TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE           = 0xA007,
+       TA_RAS_STATUS__ERROR_TIMEOUT                    = 0xA008,
+       TA_RAS_STATUS__ERROR_BLOCK_DISABLED             = 0XA009,
+       TA_RAS_STATUS__ERROR_GENERIC                    = 0xA00A,
+       TA_RAS_STATUS__ERROR_RAS_MMHUB_INIT             = 0xA00B,
+       TA_RAS_STATUS__ERROR_GET_DEV_INFO               = 0xA00C,
+       TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV            = 0xA00D,
+       TA_RAS_STATUS__ERROR_NOT_INITIALIZED            = 0xA00E,
+       TA_RAS_STATUS__ERROR_TEE_INTERNAL               = 0xA00F
 };
 
 enum ta_ras_block {
@@ -97,22 +105,39 @@ struct ta_ras_trigger_error_input {
        uint64_t                value;                  // method if error injection. i.e persistent, coherent etc.
 };
 
+struct ta_ras_output_flags
+{
+       uint8_t    ras_init_success_flag;
+       uint8_t    err_inject_switch_disable_flag;
+       uint8_t    reg_access_failure_flag;
+};
+
 /* Common input structure for RAS callbacks */
 /**********************************************************/
 union ta_ras_cmd_input {
        struct ta_ras_enable_features_input     enable_features;
        struct ta_ras_disable_features_input    disable_features;
        struct ta_ras_trigger_error_input       trigger_error;
+
+       uint32_t        reserve_pad[256];
+};
+
+union ta_ras_cmd_output
+{
+       struct ta_ras_output_flags  flags;
+
+       uint32_t        reserve_pad[256];
 };
 
 /* Shared Memory structures */
 /**********************************************************/
 struct ta_ras_shared_memory {
-       uint32_t                cmd_id;
-       uint32_t                resp_id;
-       enum ta_ras_status      ras_status;
-       uint32_t                reserved;
-       union ta_ras_cmd_input  ras_in_message;
+       uint32_t                    cmd_id;
+       uint32_t                    resp_id;
+       uint32_t                    ras_status;
+       uint32_t                    if_version;
+       union ta_ras_cmd_input      ras_in_message;
+       union ta_ras_cmd_output     ras_out_message;
 };
 
 #endif // TL_RAS_IF_H_
index 14d346321a5f7bfe78c0b4a424cc3113e23e11cb..418cf097c918a58a066d986bdca6d5b82519d3a1 100644 (file)
@@ -56,24 +56,43 @@ const uint32_t
 
 static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
 {
-       WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+       uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+       rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+                       mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+       rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+       rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+                       RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
                        RSMU_UMC_INDEX_MODE_EN, 1);
+
+       WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
 }
 
 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
 {
-       WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+       uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+       rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+                       mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+       rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+       rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+                       RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
                        RSMU_UMC_INDEX_MODE_EN, 0);
+
+       WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
 }
 
 static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
 {
-       uint32_t rsmu_umc_index;
+       uint32_t rsmu_umc_addr, rsmu_umc_val;
 
-       rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+       rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
                        mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+       rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
 
-       return REG_GET_FIELD(rsmu_umc_index,
+       return REG_GET_FIELD(rsmu_umc_val,
                        RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
                        RSMU_UMC_INDEX_MODE_EN);
 }
@@ -85,6 +104,81 @@ static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
        return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
 }
 
+static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
+                                       uint32_t umc_reg_offset)
+{
+       uint32_t ecc_err_cnt_addr;
+       uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+       if (adev->asic_type == CHIP_ARCTURUS) {
+               /* UMC 6_1_2 registers */
+               ecc_err_cnt_sel_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCntSel_ARCT);
+               ecc_err_cnt_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCnt_ARCT);
+       } else {
+               /* UMC 6_1_1 registers */
+               ecc_err_cnt_sel_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCntSel);
+               ecc_err_cnt_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCnt);
+       }
+
+       /* select the lower chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                       umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 0);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear lower chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_1_CE_CNT_INIT);
+
+       /* select the higher chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                       umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 1);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear higher chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_1_CE_CNT_INIT);
+}
+
+static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
+{
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+       uint32_t rsmu_umc_index_state =
+                               umc_v6_1_get_umc_index_mode_state(adev);
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_disable_umc_index_mode(adev);
+
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_6_reg_offset(adev,
+                                               umc_inst,
+                                               ch_inst);
+
+               umc_v6_1_clear_error_count_per_channel(adev,
+                                               umc_reg_offset);
+       }
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_enable_umc_index_mode(adev);
+}
+
 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
                                                   uint32_t umc_reg_offset,
                                                   unsigned long *error_count)
@@ -117,23 +211,21 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
        ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
                                        EccErrCntCsSel, 0);
        WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
        ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
        *error_count +=
                (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
                 UMC_V6_1_CE_CNT_INIT);
-       /* clear the lower chip err count */
-       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 
        /* select the higher chip and check the err counter */
        ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
                                        EccErrCntCsSel, 1);
        WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
        ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
        *error_count +=
                (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
                 UMC_V6_1_CE_CNT_INIT);
-       /* clear the higher chip err count */
-       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 
        /* check for SRAM correctable error
          MCUMC_STATUS is a 64 bit register */
@@ -209,6 +301,8 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
 
        if (rsmu_umc_index_state)
                umc_v6_1_enable_umc_index_mode(adev);
+
+       umc_v6_1_clear_error_count(adev);
 }
 
 static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
index 82abd8e728ab6c4794428ccd01720b7d5d7f0091..3cafba7265876fa30b2b17ac897e0996d21dcb84 100644 (file)
@@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle)
 
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -210,13 +211,10 @@ done:
 static int uvd_v4_2_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v4_2_stop(adev);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index 0fa8aae2d78ebabffd9cdf52b0396dcbb153ae42..a566ff926e90dbfa0caf4de0139036c2fc308fde 100644 (file)
@@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle)
 
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -208,13 +209,10 @@ done:
 static int uvd_v5_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v5_0_stop(adev);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index e0aadcaf6c8b3cb5629ae5605de26d2fe4a53074..0a880bc101b8470fc3116f254082229fdc333d61 100644 (file)
@@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -416,7 +418,8 @@ static int uvd_v6_0_sw_init(void *handle)
 
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -428,7 +431,9 @@ static int uvd_v6_0_sw_init(void *handle)
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
                        ring = &adev->uvd.inst->ring_enc[i];
                        sprintf(ring->name, "uvd_enc%d", i);
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->uvd.inst->irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -535,13 +540,10 @@ done:
 static int uvd_v6_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v6_0_stop(adev);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index 0995378d8263c495799068bee36b95c00874fe1c..7a55457e6f9e091a418ad76213ead235f516ef02 100644 (file)
@@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -450,7 +452,9 @@ static int uvd_v7_0_sw_init(void *handle)
                if (!amdgpu_sriov_vf(adev)) {
                        ring = &adev->uvd.inst[j].ring;
                        sprintf(ring->name, "uvd_%d", ring->me);
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->uvd.inst[j].irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -469,7 +473,9 @@ static int uvd_v7_0_sw_init(void *handle)
                                else
                                        ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
                        }
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->uvd.inst[j].irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -598,7 +604,6 @@ done:
 static int uvd_v7_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int i;
 
        if (!amdgpu_sriov_vf(adev))
                uvd_v7_0_stop(adev);
@@ -607,12 +612,6 @@ static int uvd_v7_0_hw_fini(void *handle)
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
        }
 
-       for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
-               if (adev->uvd.harvest_config & (1 << i))
-                       continue;
-               adev->uvd.inst[i].ring.sched.ready = false;
-       }
-
        return 0;
 }
 
@@ -1694,7 +1693,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+       bool enable = (state == AMD_CG_STATE_GATE);
 
        uvd_v7_0_set_bypass_mode(adev, enable);
 
index b6837fcfdba7b97e81c934472238ebb51c2bb3b1..0e2945baf0f157430452faccb5a184d7d7505b79 100644 (file)
@@ -434,7 +434,8 @@ static int vce_v2_0_sw_init(void *handle)
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
                r = amdgpu_ring_init(adev, ring, 512,
-                                    &adev->vce.irq, 0);
+                                    &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
index 217db187207c766c08a949d4f006c11d70c4cad6..6d9108fa22e0f21e08ffee13b2f78f1de1f069ec 100644 (file)
@@ -442,7 +442,8 @@ static int vce_v3_0_sw_init(void *handle)
        for (i = 0; i < adev->vce.num_rings; i++) {
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
index 3fd102efb7afe63df7c2ee018ed2c328a856e27e..a0fb119240f40d0b6a7742626d1858cce7505d06 100644 (file)
@@ -476,7 +476,8 @@ static int vce_v4_0_sw_init(void *handle)
                        else
                                ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
                }
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -539,7 +540,6 @@ static int vce_v4_0_hw_init(void *handle)
 static int vce_v4_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int i;
 
        if (!amdgpu_sriov_vf(adev)) {
                /* vce_v4_0_wait_for_idle(handle); */
@@ -549,9 +549,6 @@ static int vce_v4_0_hw_fini(void *handle)
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
        }
 
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].sched.ready = false;
-
        return 0;
 }
 
index 09b0572b838d29d6a758a570555722b03cbba341..1ad79155ed00fbafe00d6d2b80306b6d14be0299 100644 (file)
@@ -127,7 +127,8 @@ static int vcn_v1_0_sw_init(void *handle)
 
        ring = &adev->vcn.inst->ring_dec;
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -145,7 +146,8 @@ static int vcn_v1_0_sw_init(void *handle)
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                ring = &adev->vcn.inst->ring_enc[i];
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -227,14 +229,11 @@ done:
 static int vcn_v1_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                RREG32_SOC15(VCN, 0, mmUVD_STATUS))
                vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index ec8091a661df140e8ffcc6d24d1db85225cc4232..90ed773695eaffd5f9c60b7cf781c2316a11e9c0 100644 (file)
@@ -92,6 +92,7 @@ static int vcn_v2_0_sw_init(void *handle)
        struct amdgpu_ring *ring;
        int i, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       volatile struct amdgpu_fw_shared *fw_shared;
 
        /* VCN DEC TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -133,7 +134,8 @@ static int vcn_v2_0_sw_init(void *handle)
        ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
 
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -163,7 +165,8 @@ static int vcn_v2_0_sw_init(void *handle)
                else
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -174,6 +177,8 @@ static int vcn_v2_0_sw_init(void *handle)
        if (r)
                return r;
 
+       fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+       fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
        return 0;
 }
 
@@ -188,6 +193,9 @@ static int vcn_v2_0_sw_fini(void *handle)
 {
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+
+       fw_shared->present_flag_0 = 0;
 
        amdgpu_virt_free_mm_table(adev);
 
@@ -223,6 +231,10 @@ static int vcn_v2_0_hw_init(void *handle)
        if (r)
                goto done;
 
+       //Disable vcn decode for sriov
+       if (amdgpu_sriov_vf(adev))
+               ring->sched.ready = false;
+
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                ring = &adev->vcn.inst->ring_enc[i];
                r = amdgpu_ring_test_helper(ring);
@@ -248,21 +260,12 @@ done:
 static int vcn_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
-       int i;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
            (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
                vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-       ring->sched.ready = false;
-
-       for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.inst->ring_enc[i];
-               ring->sched.ready = false;
-       }
-
        return 0;
 }
 
@@ -359,6 +362,15 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 
+       /* non-cache window */
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+               lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+               upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
+               AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
+
        WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 }
 
@@ -442,13 +454,16 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
 
        /* non-cache window */
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+               lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+               upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+               UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+               AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
 
        /* VCN global tiling registers */
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
@@ -773,6 +788,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
 
 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 {
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
 
@@ -872,6 +888,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 
+       /* Stall DPG before WPTR/RPTR reset */
+       WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+               UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+               ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+       fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+
        /* set the write pointer delay */
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
 
@@ -894,11 +916,16 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                lower_32_bits(ring->wptr));
 
+       fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+       /* Unstall DPG */
+       WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+               0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
        return 0;
 }
 
 static int vcn_v2_0_start(struct amdgpu_device *adev)
 {
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
@@ -1033,6 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 
+       fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
        /* programm the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
@@ -1045,20 +1073,25 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                        lower_32_bits(ring->wptr));
+       fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+       fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
        ring = &adev->vcn.inst->ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+       fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+       fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
        ring = &adev->vcn.inst->ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+       fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
 
        return 0;
 }
@@ -1180,6 +1213,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
                                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
                        if (!ret_code) {
+                               volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
                                /* pause DPG */
                                reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
                                WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -1189,23 +1223,38 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
+                               /* Stall DPG before WPTR/RPTR reset */
+                               WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+                                          UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+                                          ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
                                /* Restore */
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst->ring_enc[0];
+                               ring->wptr = 0;
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst->ring_enc[1];
+                               ring->wptr = 0;
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+                               fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
                                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                                           RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+                               fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+                               /* Unstall DPG */
+                               WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+                                          0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
 
                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
                                           UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
@@ -1796,7 +1845,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
        uint32_t table_size = 0;
        struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
        struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
-       struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
        struct mmsch_v2_0_cmd_end end = { {0} };
        struct mmsch_v2_0_init_header *header;
        uint32_t *init_table = adev->virt.mm_table.cpu_addr;
@@ -1806,8 +1854,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
        direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
        direct_rd_mod_wt.cmd_header.command_type =
                MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
-       direct_poll.cmd_header.command_type =
-               MMSCH_COMMAND__DIRECT_REG_POLLING;
        end.cmd_header.command_type = MMSCH_COMMAND__END;
 
        if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
index c6363f5ad564026bc1f9f2d844a6bdd07cdc4630..34ed906645c9e8233409c40d72974b77ce9f4c29 100644 (file)
@@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle)
 
                        adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
                        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
-                               harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+                               harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
                                if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
                                        adev->vcn.harvest_config |= 1 << i;
                        }
@@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle)
                return r;
 
        for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+               volatile struct amdgpu_fw_shared *fw_shared;
+
                if (adev->vcn.harvest_config & (1 << j))
                        continue;
                adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
@@ -175,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle)
                adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
 
                adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
+               adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
                adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
+               adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
                adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
+               adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
                adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
+               adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
                adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
+               adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
 
                ring = &adev->vcn.inst[j].ring_dec;
                ring->use_doorbell = true;
@@ -191,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle)
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
                                (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
                sprintf(ring->name, "vcn_dec_%d", j);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
+                                    0, AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
 
@@ -203,10 +206,15 @@ static int vcn_v2_5_sw_init(void *handle)
                                        (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
 
                        sprintf(ring->name, "vcn_enc_%d.%d", j, i);
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->vcn.inst[j].irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
+
+               fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
+               fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
        }
 
        if (amdgpu_sriov_vf(adev)) {
@@ -230,8 +238,16 @@ static int vcn_v2_5_sw_init(void *handle)
  */
 static int vcn_v2_5_sw_fini(void *handle)
 {
-       int r;
+       int i, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       volatile struct amdgpu_fw_shared *fw_shared;
+
+       for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+               if (adev->vcn.harvest_config & (1 << i))
+                       continue;
+               fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+               fw_shared->present_flag_0 = 0;
+       }
 
        if (amdgpu_sriov_vf(adev))
                amdgpu_virt_free_mm_table(adev);
@@ -308,25 +324,16 @@ done:
 static int vcn_v2_5_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
-       int i, j;
+       int i;
 
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
-               ring = &adev->vcn.inst[i].ring_dec;
 
                if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
                     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
                        vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
-               ring->sched.ready = false;
-
-               for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
-                       ring = &adev->vcn.inst[i].ring_enc[j];
-                       ring->sched.ready = false;
-               }
        }
 
        return 0;
@@ -392,38 +399,47 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
                        continue;
                /* cache window 0: fw */
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
-                       WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+                       WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
                        offset = 0;
                } else {
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                                lower_32_bits(adev->vcn.inst[i].gpu_addr));
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
                                upper_32_bits(adev->vcn.inst[i].gpu_addr));
                        offset = size;
-                       WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+                       WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
                                AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
                }
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
 
                /* cache window 1: stack */
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
                /* cache window 2: context */
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+               /* non-cache window */
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+                       lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+                       upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
+                       AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
        }
 }
 
@@ -436,88 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                if (!indirect) {
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+                               VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
                } else {
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+                               VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
                }
                offset = 0;
        } else {
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
                offset = size;
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+                       VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
                        AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
        }
 
        if (!indirect)
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
        else
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
 
        /* cache window 1: stack */
        if (!indirect) {
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        } else {
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        }
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
 
        /* cache window 2: context */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+               VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
                lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+               VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
                upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
 
        /* non-cache window */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+               VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+               lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+               VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+               upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+               VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+               VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+               AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
 
        /* VCN global tiling registers */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+               VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
 }
 
 /**
@@ -671,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
                 UVD_CGC_CTRL__VCPU_MODE_MASK |
                 UVD_CGC_CTRL__MMSCH_MODE_MASK);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+               VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
 
        /* turn off clock gating */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+               VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
 
        /* turn on SUVD clock gating */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+               VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
 
        /* turn on sw mode in UVD_SUVD_CGC_CTRL */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+               VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
 }
 
 /**
@@ -750,17 +769,18 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
 
 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
 {
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
        struct amdgpu_ring *ring;
        uint32_t rb_bufsz, tmp;
 
        /* disable register anti-hang mechanism */
-       WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
                ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
        /* enable dynamic power gating mode */
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
        tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
        tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
-       WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
 
        if (indirect)
                adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
@@ -773,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
        tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
        /* disable master interupt */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
+               VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
 
        /* setup mmUVD_LMI_CTRL */
        tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@@ -789,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
                (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
                0x00100000L);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
+               VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_CNTL),
+               VCN, 0, mmUVD_MPC_CNTL),
                0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_SET_MUXA0),
+               VCN, 0, mmUVD_MPC_SET_MUXA0),
                ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
                 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
                 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_SET_MUXB0),
+               VCN, 0, mmUVD_MPC_SET_MUXB0),
                ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
                 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
                 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_SET_MUX),
+               VCN, 0, mmUVD_MPC_SET_MUX),
                ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
                 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@@ -818,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+               VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+               VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
 
        /* enable LMI MC and UMC channels */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+               VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
 
        /* unblock VCPU register access */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+               VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
 
        tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
        tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
        /* enable master interrupt */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MASTINT_EN),
+               VCN, 0, mmUVD_MASTINT_EN),
                UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
        if (indirect)
@@ -853,30 +873,41 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+       /* Stall DPG before WPTR/RPTR reset */
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+               UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+               ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+       fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
 
        /* set the write pointer delay */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
 
        /* set the wb address */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
                (upper_32_bits(ring->gpu_addr) >> 2));
 
        /* programm the RB_BASE for ring buffer */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+       WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
-       WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+       WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
                upper_32_bits(ring->gpu_addr));
 
        /* Initialize the ring buffer's read and write pointers */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
 
-       WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
 
-       ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+       ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
                lower_32_bits(ring->wptr));
 
+       fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+       /* Unstall DPG */
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+               0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
        return 0;
 }
 
@@ -898,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                }
 
                /* disable register anti-hang mechanism */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
                        ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
 
                /* set uvd status busy */
-               tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
-               WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
+               tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+               WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
        }
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
@@ -916,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
                /* enable VCPU clock */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
                        UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
 
                /* disable master interrupt */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
 
                /* setup mmUVD_LMI_CTRL */
-               tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
+               tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
                tmp &= ~0xff;
-               WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
+               WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
                        UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
                        UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
                        UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
                        UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
 
                /* setup mmUVD_MPC_CNTL */
-               tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
+               tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
                tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
                tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
                WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
 
                /* setup UVD_MPC_SET_MUXA0 */
-               WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
+               WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
                        ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
                        (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
                        (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
                        (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
 
                /* setup UVD_MPC_SET_MUXB0 */
-               WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
+               WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
                        ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
                        (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
                        (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
                        (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
 
                /* setup mmUVD_MPC_SET_MUX */
-               WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
+               WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
                        ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
                        (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                        (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
@@ -962,30 +993,31 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
        vcn_v2_5_mc_resume(adev);
 
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+               volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
                /* VCN global tiling registers */
-               WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+               WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
                        adev->gfx.config.gb_addr_config);
-               WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+               WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
                        adev->gfx.config.gb_addr_config);
 
                /* enable LMI MC and UMC channels */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
                        ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
                /* unblock VCPU register access */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
                        ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
                        ~UVD_VCPU_CNTL__BLK_RST_MASK);
 
                for (k = 0; k < 10; ++k) {
                        uint32_t status;
 
                        for (j = 0; j < 100; ++j) {
-                               status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
+                               status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
                                if (status & 2)
                                        break;
                                if (amdgpu_emu_mode == 1)
@@ -998,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                                break;
 
                        DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
-                       WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+                       WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
                                UVD_VCPU_CNTL__BLK_RST_MASK,
                                ~UVD_VCPU_CNTL__BLK_RST_MASK);
                        mdelay(10);
-                       WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+                       WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
                                ~UVD_VCPU_CNTL__BLK_RST_MASK);
 
                        mdelay(10);
@@ -1015,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                }
 
                /* enable master interrupt */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
                        UVD_MASTINT_EN__VCPU_EN_MASK,
                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
 
                /* clear the busy bit of VCN_STATUS */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
                        ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
-               WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
+               WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
 
                ring = &adev->vcn.inst[i].ring_dec;
                /* force RBC into idle state */
@@ -1033,33 +1065,40 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-               WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
+               WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
 
+               fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
                /* programm the RB_BASE for ring buffer */
-               WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
-               WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
                        upper_32_bits(ring->gpu_addr));
 
                /* Initialize the ring buffer's read and write pointers */
-               WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
+               WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
 
-               ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
-               WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
+               ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+               WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
                                lower_32_bits(ring->wptr));
-               ring = &adev->vcn.inst[i].ring_enc[0];
-               WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+               fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+               fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
+               ring = &adev->vcn.inst[i].ring_enc[0];
+               WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+               fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+               fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
                ring = &adev->vcn.inst[i].ring_enc[1];
-               WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+               WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+               fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
        }
 
        return 0;
@@ -1079,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
         * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
         *  memory descriptor location
         */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 
        /* 2, update vmid of descriptor */
-       data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+       data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
        data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
        /* use domain0 for MM scheduler */
        data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
 
        /* 3, notify mmsch about the size of this descriptor */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
 
        /* 4, set resp to zero */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
 
        /*
         * 5, kick off the initialization and wait until
         * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
         */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
 
-       data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+       data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
        loop = 10;
        while ((data & 0x10000002) != 0x10000002) {
                udelay(100);
-               data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+               data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
                loop--;
                if (!loop)
                        break;
@@ -1128,14 +1167,12 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
        uint32_t table_size = 0;
        struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
        struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
-       struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
        struct mmsch_v1_0_cmd_end end = { { 0 } };
        uint32_t *init_table = adev->virt.mm_table.cpu_addr;
        struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
 
        direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
        direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
-       direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
        end.cmd_header.command_type = MMSCH_COMMAND__END;
 
        header->version = MMSCH_VERSION;
@@ -1150,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
                table_size = 0;
 
                MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
                        ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
 
                size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
                /* mc resume*/
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
                        offset = 0;
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+                               SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
                } else {
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                lower_32_bits(adev->vcn.inst[i].gpu_addr));
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                upper_32_bits(adev->vcn.inst[i].gpu_addr));
                        offset = size;
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+                               SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
                                AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
                }
 
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
                        size);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
                        0);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
                        AMDGPU_VCN_STACK_SIZE);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
                                AMDGPU_VCN_STACK_SIZE));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
                                AMDGPU_VCN_STACK_SIZE));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
                        0);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
                        AMDGPU_VCN_CONTEXT_SIZE);
 
                ring = &adev->vcn.inst[i].ring_enc[0];
                ring->wptr = 0;
 
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
                        lower_32_bits(ring->gpu_addr));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
                        upper_32_bits(ring->gpu_addr));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
                        ring->ring_size / 4);
 
                ring = &adev->vcn.inst[i].ring_dec;
                ring->wptr = 0;
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
                        lower_32_bits(ring->gpu_addr));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
                        upper_32_bits(ring->gpu_addr));
 
@@ -1248,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
 
                /* add end packet */
                memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@@ -1269,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
        uint32_t tmp;
 
        /* Wait for power status to be 1 */
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
        /* wait for read ptr to be equal to write ptr */
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
 
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
 
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
 
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
        /* disable dynamic power gating mode */
-       WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
                        ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 
        return 0;
@@ -1330,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
                        return r;
 
                /* block VCPU register access */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
                        UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
                        ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 
                /* reset VCPU */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
                        UVD_VCPU_CNTL__BLK_RST_MASK,
                        ~UVD_VCPU_CNTL__BLK_RST_MASK);
 
                /* disable VCPU clock */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
                        ~(UVD_VCPU_CNTL__CLK_EN_MASK));
 
                /* clear status */
@@ -1349,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
                vcn_v2_5_enable_clock_gating(adev);
 
                /* enable register anti-hang mechanism */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
                        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
                        ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
        }
@@ -1365,55 +1402,74 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
 {
        struct amdgpu_ring *ring;
        uint32_t reg_data = 0;
-       int ret_code;
+       int ret_code = 0;
 
        /* pause/unpause if state is changed */
        if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
                DRM_DEBUG("dpg pause state changed %d -> %d",
                        adev->vcn.inst[inst_idx].pause_state.fw_based,  new_state->fw_based);
-               reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+               reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
                        (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
 
                if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
-                       ret_code = 0;
-                       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+                       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
                                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
                        if (!ret_code) {
+                               volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+
                                /* pause DPG */
                                reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
 
                                /* wait for ACK */
-                               SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+                               SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
+                               /* Stall DPG before WPTR/RPTR reset */
+                               WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+                                          UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+                                          ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
                                /* Restore */
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst[inst_idx].ring_enc[0];
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-
+                               ring->wptr = 0;
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst[inst_idx].ring_enc[1];
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
-                                          RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
-
-                               SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+                               ring->wptr = 0;
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+                               fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
+                                          RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+                               fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+                               /* Unstall DPG */
+                               WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+                                          0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+                               SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
                                           UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
                        }
                } else {
-                       /* unpause dpg, no need to wait */
                        reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
-                       WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+                       WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+                       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
+                               UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
                }
                adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
        }
@@ -1432,7 +1488,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
+       return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
 }
 
 /**
@@ -1449,7 +1505,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
        if (ring->use_doorbell)
                return adev->wb.wb[ring->wptr_offs];
        else
-               return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
+               return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
 }
 
 /**
@@ -1464,14 +1520,14 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
-               WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
+               WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
                        lower_32_bits(ring->wptr) | 0x80000000);
 
        if (ring->use_doorbell) {
                adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
        } else {
-               WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
        }
 }
 
@@ -1517,9 +1573,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
 
        if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
-               return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
+               return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
        else
-               return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
+               return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
 }
 
 /**
@@ -1537,12 +1593,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
-                       return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
+                       return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
        } else {
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
-                       return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
+                       return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
        }
 }
 
@@ -1562,14 +1618,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
                } else {
-                       WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+                       WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
                }
        } else {
                if (ring->use_doorbell) {
                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
                } else {
-                       WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+                       WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
                }
        }
 }
index 78b35901643bcf8707cdaa1a7a60e4cbfbeb5771..af8986a553544d97312ab8d1d3848b9a3bdfc961 100644 (file)
@@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
-static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       uint32_t reg = 0;
-
-       if (adev->asic_type == CHIP_TONGA ||
-           adev->asic_type == CHIP_FIJI) {
-              reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
-              /* bit0: 0 means pf and 1 means vf */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-              /* bit31: 0 means disable IOV and 1 means enable */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-       }
-
-       if (reg == 0) {
-               if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
        {mmGRBM_STATUS},
        {mmGRBM_STATUS2},
@@ -765,8 +744,6 @@ static int vi_asic_reset(struct amdgpu_device *adev)
        int r;
 
        if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
-               if (!adev->in_suspend)
-                       amdgpu_inc_vram_lost(adev);
                r = amdgpu_dpm_baco_reset(adev);
        } else {
                r = vi_asic_pci_config_reset(adev);
@@ -1730,9 +1707,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
 
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
-       /* in early init stage, vbios code won't work */
-       vi_detect_hw_virtualization(adev);
-
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_vi_virt_ops;
 
index 19ddd2312e00d4c8f0178d4fe6130d8b80fb09b8..7a01e6133798014e9a5e290fa9d3d8003af21069 100644 (file)
 #              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
 #              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
 #              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
-#define        PACKET3_AQUIRE_MEM                              0x58
+#define        PACKET3_ACQUIRE_MEM                             0x58
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_LOAD_UCONFIG_REG                        0x5E
 #define        PACKET3_LOAD_SH_REG                             0x5F
index 0ec5f25adf56b134461685e7cf8516caa681690f..cf0017f4d9d5ba99febc4ab1d109882643a286b6 100644 (file)
@@ -215,6 +215,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
        }
 
        q_properties->is_interop = false;
+       q_properties->is_gws = false;
        q_properties->queue_percent = args->queue_percentage;
        q_properties->priority = args->queue_priority;
        q_properties->queue_address = args->ring_base_address;
@@ -1322,6 +1323,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                goto err_free;
        }
 
+       /* Update the VRAM usage count */
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+               WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+
        mutex_unlock(&p->mutex);
 
        args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1337,7 +1342,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
        return 0;
 
 err_free:
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
 err_unlock:
        mutex_unlock(&p->mutex);
        return err;
@@ -1351,6 +1356,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
        void *mem;
        struct kfd_dev *dev;
        int ret;
+       uint64_t size = 0;
 
        dev = kfd_device_by_id(GET_GPU_ID(args->handle));
        if (!dev)
@@ -1373,7 +1379,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
        }
 
        ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
-                                               (struct kgd_mem *)mem);
+                                               (struct kgd_mem *)mem, &size);
 
        /* If freeing the buffer failed, leave the handle in place for
         * clean-up during process tear-down.
@@ -1382,6 +1388,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
                kfd_process_device_remove_obj_handle(
                        pdd, GET_IDR_HANDLE(args->handle));
 
+       WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+
 err_unlock:
        mutex_unlock(&p->mutex);
        return ret;
@@ -1584,6 +1592,45 @@ copy_from_user_failed:
        return err;
 }
 
+static int kfd_ioctl_alloc_queue_gws(struct file *filep,
+               struct kfd_process *p, void *data)
+{
+       int retval;
+       struct kfd_ioctl_alloc_queue_gws_args *args = data;
+       struct queue *q;
+       struct kfd_dev *dev;
+
+       mutex_lock(&p->mutex);
+       q = pqm_get_user_queue(&p->pqm, args->queue_id);
+
+       if (q) {
+               dev = q->device;
+       } else {
+               retval = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (!dev->gws) {
+               retval = -ENODEV;
+               goto out_unlock;
+       }
+
+       if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+               retval = -ENODEV;
+               goto out_unlock;
+       }
+
+       retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
+       mutex_unlock(&p->mutex);
+
+       args->first_gws = 0;
+       return retval;
+
+out_unlock:
+       mutex_unlock(&p->mutex);
+       return retval;
+}
+
 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
                struct kfd_process *p, void *data)
 {
@@ -1687,7 +1734,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
        return 0;
 
 err_free:
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
 err_unlock:
        mutex_unlock(&p->mutex);
        return r;
@@ -1786,6 +1833,8 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
                                kfd_ioctl_import_dmabuf, 0),
 
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
+                       kfd_ioctl_alloc_queue_gws, 0),
 };
 
 #define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
index de9f68d5c31278ea80e3456716cebd789bd0d54a..1009a3b8dcc2cc7e0ad06ce8a8713b0519d5a4bc 100644 (file)
@@ -502,7 +502,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
        num_nodes = crat_table->num_domains;
        image_len = crat_table->length;
 
-       pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+       pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
 
        for (node_id = 0; node_id < num_nodes; node_id++) {
                top_dev = kfd_create_topology_device(device_list);
index 05bc6d96ec5275b00a5b1eb9a69c506eee43ce44..0491ab2b4a9b3bb7746c45693d329669157b4b38 100644 (file)
@@ -569,6 +569,23 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
        }
 }
 
+static int kfd_gws_init(struct kfd_dev *kfd)
+{
+       int ret = 0;
+
+       if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+               return 0;
+
+       if (hws_gws_support
+               || (kfd->device_info->asic_family >= CHIP_VEGA10
+                       && kfd->device_info->asic_family <= CHIP_RAVEN
+                       && kfd->mec2_fw_version >= 0x1b3))
+               ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
+                               amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
+
+       return ret;
+}
+
 bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         struct drm_device *ddev,
                         const struct kgd2kfd_shared_resources *gpu_resources)
@@ -578,6 +595,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->ddev = ddev;
        kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
                        KGD_ENGINE_MEC1);
+       kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+                       KGD_ENGINE_MEC2);
        kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
                        KGD_ENGINE_SDMA1);
        kfd->shared_resources = *gpu_resources;
@@ -598,13 +617,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        } else
                kfd->max_proc_per_quantum = hws_max_conc_proc;
 
-       /* Allocate global GWS that is shared by all KFD processes */
-       if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
-                       amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws)) {
-               dev_err(kfd_device, "Could not allocate %d gws\n",
-                       amdgpu_amdkfd_get_num_gws(kfd->kgd));
-               goto out;
-       }
        /* calculate max size of mqds needed for queues */
        size = max_num_of_queues_per_device *
                        kfd->device_info->mqd_size_aligned;
@@ -662,6 +674,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                goto device_queue_manager_error;
        }
 
+       /* If supported on this device, allocate global GWS that is shared
+        * by all KFD processes
+        */
+       if (kfd_gws_init(kfd)) {
+               dev_err(kfd_device, "Could not allocate %d gws\n",
+                       amdgpu_amdkfd_get_num_gws(kfd->kgd));
+               goto gws_error;
+       }
+
        if (kfd_iommu_device_init(kfd)) {
                dev_err(kfd_device, "Error initializing iommuv2\n");
                goto device_iommu_error;
@@ -691,6 +712,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 kfd_topology_add_device_error:
 kfd_resume_error:
 device_iommu_error:
+gws_error:
        device_queue_manager_uninit(kfd->dqm);
 device_queue_manager_error:
        kfd_interrupt_exit(kfd);
@@ -701,7 +723,7 @@ kfd_doorbell_error:
 kfd_gtt_sa_init_error:
        amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 alloc_gtt_mem_failure:
-       if (hws_gws_support)
+       if (kfd->gws)
                amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
        dev_err(kfd_device,
                "device %x:%x NOT added due to errors\n",
@@ -720,7 +742,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
                kfd_doorbell_fini(kfd);
                kfd_gtt_sa_fini(kfd);
                amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
-               if (hws_gws_support)
+               if (kfd->gws)
                        amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
        }
 
index 77ea0f0cb163b93d2819368214592b50e7466fe0..e9c4867abeffba32f3034d06e41a57a5709227dd 100644 (file)
@@ -505,8 +505,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
                deallocate_vmid(dqm, qpd, q);
        }
        qpd->queue_count--;
-       if (q->properties.is_active)
+       if (q->properties.is_active) {
                decrement_queue_count(dqm, q->properties.type);
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count--;
+                       qpd->mapped_gws_queue = false;
+               }
+       }
 
        return retval;
 }
@@ -583,6 +588,20 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
        else if (!q->properties.is_active && prev_active)
                decrement_queue_count(dqm, q->properties.type);
 
+       if (q->gws && !q->properties.is_gws) {
+               if (q->properties.is_active) {
+                       dqm->gws_queue_count++;
+                       pdd->qpd.mapped_gws_queue = true;
+               }
+               q->properties.is_gws = true;
+       } else if (!q->gws && q->properties.is_gws) {
+               if (q->properties.is_active) {
+                       dqm->gws_queue_count--;
+                       pdd->qpd.mapped_gws_queue = false;
+               }
+               q->properties.is_gws = false;
+       }
+
        if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
                retval = map_queues_cpsch(dqm);
        else if (q->properties.is_active &&
@@ -631,6 +650,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
                                q->properties.type)];
                q->properties.is_active = false;
                decrement_queue_count(dqm, q->properties.type);
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count--;
+                       qpd->mapped_gws_queue = false;
+               }
 
                if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
                        continue;
@@ -744,6 +767,10 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                                q->properties.type)];
                q->properties.is_active = true;
                increment_queue_count(dqm, q->properties.type);
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count++;
+                       qpd->mapped_gws_queue = true;
+               }
 
                if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
                        continue;
@@ -913,6 +940,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
        INIT_LIST_HEAD(&dqm->queues);
        dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
        dqm->active_cp_queue_count = 0;
+       dqm->gws_queue_count = 0;
 
        for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
                int pipe_offset = pipe * get_queues_per_pipe(dqm);
@@ -1061,7 +1089,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
                        break;
                }
 
-               res.queue_mask |= (1ull << i);
+               res.queue_mask |= 1ull
+                       << amdgpu_queue_mask_bit_to_set_resource_bit(
+                               (struct amdgpu_device *)dqm->dev->kgd, i);
        }
        res.gws_mask = ~0ull;
        res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
@@ -1082,7 +1112,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
        INIT_LIST_HEAD(&dqm->queues);
        dqm->active_queue_count = dqm->processes_count = 0;
        dqm->active_cp_queue_count = 0;
-
+       dqm->gws_queue_count = 0;
        dqm->active_runlist = false;
        dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
        dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1432,6 +1462,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
                if (retval == -ETIME)
                        qpd->reset_wavefronts = true;
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count--;
+                       qpd->mapped_gws_queue = false;
+               }
        }
 
        /*
@@ -1650,8 +1684,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
                else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
                        deallocate_sdma_queue(dqm, q);
 
-               if (q->properties.is_active)
+               if (q->properties.is_active) {
                        decrement_queue_count(dqm, q->properties.type);
+                       if (q->properties.is_gws) {
+                               dqm->gws_queue_count--;
+                               qpd->mapped_gws_queue = false;
+                       }
+               }
 
                dqm->total_queue_count--;
        }
index 50d919f814e9add4e77c4747aef9bf16677e1694..4afa015c69b11cf5c9bf4ce114a33359745aba28 100644 (file)
@@ -182,6 +182,7 @@ struct device_queue_manager {
        unsigned int            processes_count;
        unsigned int            active_queue_count;
        unsigned int            active_cp_queue_count;
+       unsigned int            gws_queue_count;
        unsigned int            total_queue_count;
        unsigned int            next_pipe_to_allocate;
        unsigned int            *allocated_queues;
index bae706462f962790be6ef3d463605c9f08a01ee6..a2b77d1df8540b516e2335b06544b07f9f70fda0 100644 (file)
@@ -126,6 +126,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
 
        prop.queue_size = queue_size;
        prop.is_interop = false;
+       prop.is_gws = false;
        prop.priority = 1;
        prop.queue_percent = 100;
        prop.type = type;
index efdb75e7677b393a3173c5803a6047c78bb5b9d5..685ca82d42fe8a81a231dc8902c318103d80c7ed 100644 (file)
@@ -41,7 +41,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
                                unsigned int *rlib_size,
                                bool *over_subscription)
 {
-       unsigned int process_count, queue_count, compute_queue_count;
+       unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
        unsigned int map_queue_size;
        unsigned int max_proc_per_quantum = 1;
        struct kfd_dev *dev = pm->dqm->dev;
@@ -49,6 +49,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
        process_count = pm->dqm->processes_count;
        queue_count = pm->dqm->active_queue_count;
        compute_queue_count = pm->dqm->active_cp_queue_count;
+       gws_queue_count = pm->dqm->gws_queue_count;
 
        /* check if there is over subscription
         * Note: the arbitration between the number of VMIDs and
@@ -61,7 +62,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
                max_proc_per_quantum = dev->max_proc_per_quantum;
 
        if ((process_count > max_proc_per_quantum) ||
-           compute_queue_count > get_cp_queues_num(pm->dqm)) {
+           compute_queue_count > get_cp_queues_num(pm->dqm) ||
+           gws_queue_count > 1) {
                *over_subscription = true;
                pr_debug("Over subscribed runlist\n");
        }
index 2de01009f1b6d6efc623b1800c8e60d260179589..bdca9dc5f1181bdeebd8467b980e17f5d18ec583 100644 (file)
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
        packet->bitfields2.pasid = qpd->pqm->process->pasid;
        packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
        packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
-       packet->bitfields14.num_gws = qpd->num_gws;
+       packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
        packet->bitfields14.num_oac = qpd->num_oac;
        packet->bitfields14.sdma_enable = 1;
        packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
index 4a3049841086d826ea8d798606a7af82dd44ffc1..cde5e4c7caa166e9a648596d234d9c0913c66247 100644 (file)
@@ -282,6 +282,7 @@ struct kfd_dev {
 
        /* Firmware versions */
        uint16_t mec_fw_version;
+       uint16_t mec2_fw_version;
        uint16_t sdma_fw_version;
 
        /* Maximum process number mapped to HW scheduler */
@@ -410,6 +411,10 @@ enum KFD_QUEUE_PRIORITY {
  * @is_active: Defines if the queue is active or not. @is_active and
  * @is_evicted are protected by the DQM lock.
  *
+ * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
+ * @is_gws should be protected by the DQM lock, since changing it can yield the
+ * possibility of updating DQM state on number of GWS queues.
+ *
  * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
  * of the queue.
  *
@@ -432,6 +437,7 @@ struct queue_properties {
        bool is_interop;
        bool is_evicted;
        bool is_active;
+       bool is_gws;
        /* Not relevant for user mode queues in cp scheduling */
        unsigned int vmid;
        /* Relevant only for sdma queues*/
@@ -563,6 +569,14 @@ struct qcm_process_device {
         */
        bool reset_wavefronts;
 
+       /* This flag tells us if this process has a GWS-capable
+        * queue that will be mapped into the runlist. It's
+        * possible to request a GWS BO, but not have the queue
+        * currently mapped, and this changes how the MAP_PROCESS
+        * PM4 packet is configured.
+        */
+       bool mapped_gws_queue;
+
        /*
         * All the memory management data should be here too
         */
@@ -615,6 +629,8 @@ enum kfd_pdd_bound {
        PDD_BOUND_SUSPENDED,
 };
 
+#define MAX_VRAM_FILENAME_LEN 11
+
 /* Data that is per-process-per device. */
 struct kfd_process_device {
        /*
@@ -657,6 +673,11 @@ struct kfd_process_device {
 
        /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
        enum kfd_pdd_bound bound;
+
+       /* VRAM usage */
+       uint64_t vram_usage;
+       struct attribute attr_vram;
+       char vram_filename[MAX_VRAM_FILENAME_LEN];
 };
 
 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -923,6 +944,8 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
                        void *gws);
 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
                                                unsigned int qid);
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+                                               unsigned int qid);
 int pqm_get_wave_state(struct process_queue_manager *pqm,
                       unsigned int qid,
                       void __user *ctl_stack,
index fe0cd49d4ea7ce1708dfc00e2388a4bd18dd8c9b..d27221ddcdeb3d920021431ad37a542a3a85ecea 100644 (file)
@@ -79,18 +79,22 @@ static struct kfd_procfs_tree procfs;
 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
                               char *buffer)
 {
-       int val = 0;
-
        if (strcmp(attr->name, "pasid") == 0) {
                struct kfd_process *p = container_of(attr, struct kfd_process,
                                                     attr_pasid);
-               val = p->pasid;
+
+               return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
+       } else if (strncmp(attr->name, "vram_", 5) == 0) {
+               struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
+                                                             attr_vram);
+               if (pdd)
+                       return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
        } else {
                pr_err("Invalid attribute");
                return -EINVAL;
        }
 
-       return snprintf(buffer, PAGE_SIZE, "%d\n", val);
+       return 0;
 }
 
 static void kfd_procfs_kobj_release(struct kobject *kobj)
@@ -206,6 +210,34 @@ int kfd_procfs_add_queue(struct queue *q)
        return 0;
 }
 
+int kfd_procfs_add_vram_usage(struct kfd_process *p)
+{
+       int ret = 0;
+       struct kfd_process_device *pdd;
+
+       if (!p)
+               return -EINVAL;
+
+       if (!p->kobj)
+               return -EFAULT;
+
+       /* Create proc/<pid>/vram_<gpuid> file for each GPU */
+       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
+                        pdd->dev->id);
+               pdd->attr_vram.name = pdd->vram_filename;
+               pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&pdd->attr_vram);
+               ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
+               if (ret)
+                       pr_warn("Creating vram usage for gpu id %d failed",
+                               (int)pdd->dev->id);
+       }
+
+       return ret;
+}
+
+
 void kfd_procfs_del_queue(struct queue *q)
 {
        if (!q)
@@ -248,7 +280,7 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
        struct kfd_dev *dev = pdd->dev;
 
        amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
 }
 
 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -312,7 +344,7 @@ sync_memory_failed:
        return err;
 
 err_map_mem:
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
 err_alloc_mem:
        *kptr = NULL;
        return err;
@@ -411,6 +443,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
                                                        process->kobj);
                if (!process->kobj_queues)
                        pr_warn("Creating KFD proc/queues folder failed");
+
+               ret = kfd_procfs_add_vram_usage(process);
+               if (ret)
+                       pr_warn("Creating vram usage file for pid %d failed",
+                               (int)process->lead_thread->pid);
        }
 out:
        if (!IS_ERR(process))
@@ -488,7 +525,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
                                peer_pdd->dev->kgd, mem, peer_pdd->vm);
                }
 
-               amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
+               amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
                kfd_process_device_remove_obj_handle(pdd, id);
        }
 }
@@ -551,6 +588,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 {
        struct kfd_process *p = container_of(work, struct kfd_process,
                                             release_work);
+       struct kfd_process_device *pdd;
 
        /* Remove the procfs files */
        if (p->kobj) {
@@ -558,6 +596,10 @@ static void kfd_process_wq_release(struct work_struct *work)
                kobject_del(p->kobj_queues);
                kobject_put(p->kobj_queues);
                p->kobj_queues = NULL;
+
+               list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+                       sysfs_remove_file(p->kobj, &pdd->attr_vram);
+
                kobject_del(p->kobj);
                kobject_put(p->kobj);
                p->kobj = NULL;
@@ -858,10 +900,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        pdd->qpd.dqm = dev->dqm;
        pdd->qpd.pqm = &p->pqm;
        pdd->qpd.evicted = 0;
+       pdd->qpd.mapped_gws_queue = false;
        pdd->process = p;
        pdd->bound = PDD_UNBOUND;
        pdd->already_dequeued = false;
        pdd->runtime_inuse = false;
+       pdd->vram_usage = 0;
        list_add(&pdd->per_device_list, &p->per_device_data);
 
        /* Init idr used for memory handle translation */
@@ -1078,7 +1122,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
        return p;
 }
 
-/* process_evict_queues - Evict all user queues of a process
+/* kfd_process_evict_queues - Evict all user queues of a process
  *
  * Eviction is reference-counted per process-device. This means multiple
  * evictions from different sources can be nested safely.
@@ -1118,7 +1162,7 @@ fail:
        return r;
 }
 
-/* process_restore_queues - Restore all user queues of a process */
+/* kfd_process_restore_queues - Restore all user queues of a process */
 int kfd_process_restore_queues(struct kfd_process *p)
 {
        struct kfd_process_device *pdd;
index 084c35f55d591f025d95053effe2d61b2117bf46..eb1635ac89887c18534e1ce1e969469386fcbf45 100644 (file)
@@ -476,6 +476,15 @@ struct kernel_queue *pqm_get_kernel_queue(
        return NULL;
 }
 
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+                                       unsigned int qid)
+{
+       struct process_queue_node *pqn;
+
+       pqn = get_queue_by_qid(pqm, qid);
+       return pqn ? pqn->q : NULL;
+}
+
 int pqm_get_wave_state(struct process_queue_manager *pqm,
                       unsigned int qid,
                       void __user *ctl_stack,
index aa0bfa78a66741be2fbb0dfcb8b598d608119498..bb77f7af2b6d9e3f37ce33d56614952021dbd01c 100644 (file)
@@ -478,6 +478,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->node_props.device_id);
        sysfs_show_32bit_prop(buffer, "location_id",
                        dev->node_props.location_id);
+       sysfs_show_32bit_prop(buffer, "domain",
+                       dev->node_props.domain);
        sysfs_show_32bit_prop(buffer, "drm_render_minor",
                        dev->node_props.drm_render_minor);
        sysfs_show_64bit_prop(buffer, "hive_id",
@@ -787,7 +789,6 @@ static int kfd_topology_update_sysfs(void)
 {
        int ret;
 
-       pr_info("Creating topology SYSFS entries\n");
        if (!sys_props.kobj_topology) {
                sys_props.kobj_topology =
                                kfd_alloc_struct(sys_props.kobj_topology);
@@ -1048,7 +1049,6 @@ int kfd_topology_init(void)
                sys_props.generation_count++;
                kfd_update_system_properties();
                kfd_debug_print_topology();
-               pr_info("Finished initializing topology\n");
        } else
                pr_err("Failed to update topology in sysfs ret=%d\n", ret);
 
@@ -1303,7 +1303,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 
        dev->node_props.vendor_id = gpu->pdev->vendor;
        dev->node_props.device_id = gpu->pdev->device;
+       dev->node_props.capability |=
+               ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) <<
+                       HSA_CAP_ASIC_REVISION_SHIFT) &
+                       HSA_CAP_ASIC_REVISION_MASK);
        dev->node_props.location_id = pci_dev_id(gpu->pdev);
+       dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
        dev->node_props.max_engine_clk_fcompute =
                amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
        dev->node_props.max_engine_clk_ccompute =
@@ -1317,7 +1322,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
                                gpu->device_info->num_xgmi_sdma_engines;
        dev->node_props.num_sdma_queues_per_engine =
                                gpu->device_info->num_sdma_queues_per_engine;
-       dev->node_props.num_gws = (hws_gws_support &&
+       dev->node_props.num_gws = (dev->gpu->gws &&
                dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
                amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
        dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
index 46eeecaf1b68ee67506524c196a7c8c747b3e2aa..326d9b26b7aa7fbbab553671ee1a0d348739d8bf 100644 (file)
@@ -41,7 +41,6 @@
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT   8
 #define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK   0x00003000
 #define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT  12
-#define HSA_CAP_RESERVED                       0xffffc000
 
 #define HSA_CAP_DOORBELL_TYPE_PRE_1_0          0x0
 #define HSA_CAP_DOORBELL_TYPE_1_0              0x1
 #define HSA_CAP_SRAM_EDCSUPPORTED              0x00080000
 #define HSA_CAP_MEM_EDCSUPPORTED               0x00100000
 #define HSA_CAP_RASEVENTNOTIFY                 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK             0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT            22
+
+#define HSA_CAP_RESERVED                       0xfc078000
 
 struct kfd_node_properties {
        uint64_t hive_id;
@@ -77,6 +80,7 @@ struct kfd_node_properties {
        uint32_t vendor_id;
        uint32_t device_id;
        uint32_t location_id;
+       uint32_t domain;
        uint32_t max_engine_clk_fcompute;
        uint32_t max_engine_clk_ccompute;
        int32_t  drm_render_minor;
index 87858bc57e640b33c7ab83021685a718ac06fe96..1911a34cc0602c88a21582e0ed507263279b8444 100644 (file)
@@ -21,16 +21,12 @@ config DRM_AMD_DC_HDCP
        bool "Enable HDCP support in DC"
        depends on DRM_AMD_DC
        help
-        Choose this option
-        if you want to support
-        HDCP authentication
+         Choose this option if you want to support HDCP authentication.
 
 config DEBUG_KERNEL_DC
        bool "Enable kgdb break in DC"
        depends on DRM_AMD_DC
        help
-         Choose this option
-         if you want to hit
-         kdgb_break in assert.
+         Choose this option if you want to hit kdgb_break in assert.
 
 endmenu
index f7c5cdc10a705b1b78cfb074596649f55d831639..48f2b3710e7ce88b7a36a2e3dbf759af0451085c 100644 (file)
@@ -30,7 +30,7 @@
 #include "dc.h"
 #include "dc/inc/core_types.h"
 #include "dal_asic_id.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
 #include "dc/inc/hw/dmcu.h"
 #include "dc/inc/hw/abm.h"
 #include "dc/dc_dmub_srv.h"
@@ -441,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
 
 /**
  * dm_crtc_high_irq() - Handles CRTC interrupt
- * @interrupt_params: ignored
+ * @interrupt_params: used for determining the CRTC instance
  *
  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
  * event handler.
@@ -455,70 +455,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
        unsigned long flags;
 
        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
-       if (acrtc) {
-               acrtc_state = to_dm_crtc_state(acrtc->base.state);
-
-               DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
-                             acrtc->crtc_id,
-                             amdgpu_dm_vrr_active(acrtc_state));
-
-               /* Core vblank handling at start of front-porch is only possible
-                * in non-vrr mode, as only there vblank timestamping will give
-                * valid results while done in front-porch. Otherwise defer it
-                * to dm_vupdate_high_irq after end of front-porch.
-                */
-               if (!amdgpu_dm_vrr_active(acrtc_state))
-                       drm_crtc_handle_vblank(&acrtc->base);
-
-               /* Following stuff must happen at start of vblank, for crc
-                * computation and below-the-range btr support in vrr mode.
-                */
-               amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
-               if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
-                   acrtc_state->vrr_params.supported &&
-                   acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
-                       spin_lock_irqsave(&adev->ddev->event_lock, flags);
-                       mod_freesync_handle_v_update(
-                               adev->dm.freesync_module,
-                               acrtc_state->stream,
-                               &acrtc_state->vrr_params);
-
-                       dc_stream_adjust_vmin_vmax(
-                               adev->dm.dc,
-                               acrtc_state->stream,
-                               &acrtc_state->vrr_params.adjust);
-                       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
-               }
-       }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-/**
- * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
- * @interrupt params - interrupt parameters
- *
- * Notify DRM's vblank event handler at VSTARTUP
- *
- * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
- * * We are close enough to VUPDATE - the point of no return for hw
- * * We are in the fixed portion of variable front porch when vrr is enabled
- * * We are before VUPDATE, where double-buffered vrr registers are swapped
- *
- * It is therefore the correct place to signal vblank, send user flip events,
- * and update VRR.
- */
-static void dm_dcn_crtc_high_irq(void *interrupt_params)
-{
-       struct common_irq_params *irq_params = interrupt_params;
-       struct amdgpu_device *adev = irq_params->adev;
-       struct amdgpu_crtc *acrtc;
-       struct dm_crtc_state *acrtc_state;
-       unsigned long flags;
-
-       acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
        if (!acrtc)
                return;
 
@@ -528,22 +464,35 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
                         amdgpu_dm_vrr_active(acrtc_state),
                         acrtc_state->active_planes);
 
+       /**
+        * Core vblank handling at start of front-porch is only possible
+        * in non-vrr mode, as only there vblank timestamping will give
+        * valid results while done in front-porch. Otherwise defer it
+        * to dm_vupdate_high_irq after end of front-porch.
+        */
+       if (!amdgpu_dm_vrr_active(acrtc_state))
+               drm_crtc_handle_vblank(&acrtc->base);
+
+       /**
+        * Following stuff must happen at start of vblank, for crc
+        * computation and below-the-range btr support in vrr mode.
+        */
        amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-       drm_crtc_handle_vblank(&acrtc->base);
+
+       /* BTR updates need to happen before VUPDATE on Vega and above. */
+       if (adev->family < AMDGPU_FAMILY_AI)
+               return;
 
        spin_lock_irqsave(&adev->ddev->event_lock, flags);
 
-       if (acrtc_state->vrr_params.supported &&
+       if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
            acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
-               mod_freesync_handle_v_update(
-               adev->dm.freesync_module,
-               acrtc_state->stream,
-               &acrtc_state->vrr_params);
+               mod_freesync_handle_v_update(adev->dm.freesync_module,
+                                            acrtc_state->stream,
+                                            &acrtc_state->vrr_params);
 
-               dc_stream_adjust_vmin_vmax(
-                       adev->dm.dc,
-                       acrtc_state->stream,
-                       &acrtc_state->vrr_params.adjust);
+               dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
+                                          &acrtc_state->vrr_params.adjust);
        }
 
        /*
@@ -556,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
         * avoid race conditions between flip programming and completion,
         * which could cause too early flip completion events.
         */
-       if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+       if (adev->family >= AMDGPU_FAMILY_RV &&
+           acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
            acrtc_state->active_planes == 0) {
                if (acrtc->event) {
                        drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
@@ -568,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 }
-#endif
 
 static int dm_set_clockgating_state(void *handle,
                  enum amd_clockgating_state state)
@@ -825,8 +774,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                                fw_inst_const_size);
        }
 
-       memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
-              fw_bss_data_size);
+       if (fw_bss_data_size)
+               memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+                      fw_bss_data, fw_bss_data_size);
 
        /* Copy firmware bios info into FB memory. */
        memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
@@ -1265,6 +1215,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                adev->dm.dmub_fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
                le32_to_cpu(hdr->inst_const_bytes);
+       region_params.fw_inst_const =
+               adev->dm.dmub_fw->data +
+               le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+               PSP_HEADER_BYTES;
 
        status = dmub_srv_calc_region_info(dmub_srv, &region_params,
                                           &region_info);
@@ -1384,9 +1338,14 @@ static int dm_late_init(void *handle)
        struct dmcu_iram_parameters params;
        unsigned int linear_lut[16];
        int i;
-       struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+       struct dmcu *dmcu = NULL;
        bool ret = false;
 
+       if (!adev->dm.fw_dmcu)
+               return detect_mst_link_for_all_connectors(adev->ddev);
+
+       dmcu = adev->dm.dc->res_pool->dmcu;
+
        for (i = 0; i < 16; i++)
                linear_lut[i] = 0xFFFF * i / 15;
 
@@ -1566,7 +1525,6 @@ static int dm_suspend(void *handle)
 {
        struct amdgpu_device *adev = handle;
        struct amdgpu_display_manager *dm = &adev->dm;
-       int ret = 0;
 
        WARN_ON(adev->dm.cached_state);
        adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
@@ -1578,7 +1536,7 @@ static int dm_suspend(void *handle)
 
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
-       return ret;
+       return 0;
 }
 
 static struct amdgpu_dm_connector *
@@ -2008,17 +1966,22 @@ void amdgpu_dm_update_connector_after_detect(
                dc_sink_retain(aconnector->dc_sink);
                if (sink->dc_edid.length == 0) {
                        aconnector->edid = NULL;
-                       drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+                       if (aconnector->dc_link->aux_mode) {
+                               drm_dp_cec_unset_edid(
+                                       &aconnector->dm_dp_aux.aux);
+                       }
                } else {
                        aconnector->edid =
-                               (struct edid *) sink->dc_edid.raw_edid;
-
+                               (struct edid *)sink->dc_edid.raw_edid;
 
                        drm_connector_update_edid_property(connector,
-                                       aconnector->edid);
-                       drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
-                                           aconnector->edid);
+                                                          aconnector->edid);
+
+                       if (aconnector->dc_link->aux_mode)
+                               drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+                                                   aconnector->edid);
                }
+
                amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
                update_connector_ext_caps(aconnector);
        } else {
@@ -2440,8 +2403,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
                c_irq_params->adev = adev;
                c_irq_params->irq_src = int_params.irq_source;
 
+               amdgpu_dm_irq_register_interrupt(
+                       adev, &int_params, dm_crtc_high_irq, c_irq_params);
+       }
+
+       /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+        * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+        * to trigger at end of each vblank, regardless of state of the lock,
+        * matching DCE behaviour.
+        */
+       for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+            i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+            i++) {
+               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+               if (r) {
+                       DRM_ERROR("Failed to add vupdate irq id!\n");
+                       return r;
+               }
+
+               int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+               int_params.irq_source =
+                       dc_interrupt_to_irq_source(dc, i, 0);
+
+               c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+               c_irq_params->adev = adev;
+               c_irq_params->irq_src = int_params.irq_source;
+
                amdgpu_dm_irq_register_interrupt(adev, &int_params,
-                               dm_dcn_crtc_high_irq, c_irq_params);
+                               dm_vupdate_high_irq, c_irq_params);
        }
 
        /* Use GRPH_PFLIP interrupt */
@@ -3304,7 +3295,7 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
 }
 
 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
-                      uint64_t *tiling_flags)
+                      uint64_t *tiling_flags, bool *tmz_surface)
 {
        struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
        int r = amdgpu_bo_reserve(rbo, false);
@@ -3319,6 +3310,9 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
        if (tiling_flags)
                amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
 
+       if (tmz_surface)
+               *tmz_surface = amdgpu_bo_encrypted(rbo);
+
        amdgpu_bo_unreserve(rbo);
 
        return r;
@@ -3340,7 +3334,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
                          const union dc_tiling_info *tiling_info,
                          const uint64_t info,
                          struct dc_plane_dcc_param *dcc,
-                         struct dc_plane_address *address)
+                         struct dc_plane_address *address,
+                         bool force_disable_dcc)
 {
        struct dc *dc = adev->dm.dc;
        struct dc_dcc_surface_param input;
@@ -3352,6 +3347,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
        memset(&input, 0, sizeof(input));
        memset(&output, 0, sizeof(output));
 
+       if (force_disable_dcc)
+               return 0;
+
        if (!offset)
                return 0;
 
@@ -3401,7 +3399,9 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
                             union dc_tiling_info *tiling_info,
                             struct plane_size *plane_size,
                             struct dc_plane_dcc_param *dcc,
-                            struct dc_plane_address *address)
+                            struct dc_plane_address *address,
+                            bool tmz_surface,
+                            bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = &afb->base;
        int ret;
@@ -3411,6 +3411,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
        memset(dcc, 0, sizeof(*dcc));
        memset(address, 0, sizeof(*address));
 
+       address->tmz_surface = tmz_surface;
+
        if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
                plane_size->surface_size.x = 0;
                plane_size->surface_size.y = 0;
@@ -3507,7 +3509,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
 
                ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
                                                plane_size, tiling_info,
-                                               tiling_flags, dcc, address);
+                                               tiling_flags, dcc, address,
+                                               force_disable_dcc);
                if (ret)
                        return ret;
        }
@@ -3599,7 +3602,9 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                            const struct drm_plane_state *plane_state,
                            const uint64_t tiling_flags,
                            struct dc_plane_info *plane_info,
-                           struct dc_plane_address *address)
+                           struct dc_plane_address *address,
+                           bool tmz_surface,
+                           bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = plane_state->fb;
        const struct amdgpu_framebuffer *afb =
@@ -3642,6 +3647,10 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
        case DRM_FORMAT_P010:
                plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
                break;
+       case DRM_FORMAT_XRGB16161616F:
+       case DRM_FORMAT_ARGB16161616F:
+               plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+               break;
        default:
                DRM_ERROR(
                        "Unsupported screen format %s\n",
@@ -3681,7 +3690,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                                           plane_info->rotation, tiling_flags,
                                           &plane_info->tiling_info,
                                           &plane_info->plane_size,
-                                          &plane_info->dcc, address);
+                                          &plane_info->dcc, address, tmz_surface,
+                                          force_disable_dcc);
        if (ret)
                return ret;
 
@@ -3704,6 +3714,8 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        struct dc_plane_info plane_info;
        uint64_t tiling_flags;
        int ret;
+       bool tmz_surface = false;
+       bool force_disable_dcc = false;
 
        ret = fill_dc_scaling_info(plane_state, &scaling_info);
        if (ret)
@@ -3714,13 +3726,16 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        dc_plane_state->clip_rect = scaling_info.clip_rect;
        dc_plane_state->scaling_quality = scaling_info.scaling_quality;
 
-       ret = get_fb_info(amdgpu_fb, &tiling_flags);
+       ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
        if (ret)
                return ret;
 
+       force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
        ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
                                          &plane_info,
-                                         &dc_plane_state->address);
+                                         &dc_plane_state->address,
+                                         tmz_surface,
+                                         force_disable_dcc);
        if (ret)
                return ret;
 
@@ -4324,14 +4339,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
-       if (stream->link->psr_feature_enabled)  {
+       if (stream->link->psr_settings.psr_feature_enabled)     {
                struct dc  *core_dc = stream->link->ctx->dc;
 
                if (dc_is_dmcu_initialized(core_dc)) {
-                       struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
-                       stream->psr_version = dmcu->dmcu_version.psr_version;
-
                        //
                        // should decide stream support vsc sdp colorimetry capability
                        // before building vsc info packet
@@ -4437,10 +4448,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        int rc;
 
-       /* Do not set vupdate for DCN hardware */
-       if (adev->family > AMDGPU_FAMILY_AI)
-               return 0;
-
        irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
 
        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -4664,6 +4671,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
                i2c_del_adapter(&aconnector->i2c->base);
                kfree(aconnector->i2c);
        }
+       kfree(aconnector->dm_dp_aux.aux.name);
 
        kfree(connector);
 }
@@ -4726,6 +4734,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
 #if defined(CONFIG_DEBUG_FS)
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
+       int r;
+
+       if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+           (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+               amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+               r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+               if (r)
+                       return r;
+       }
 
        connector_debugfs_init(amdgpu_dm_connector);
 #endif
@@ -5332,6 +5349,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        uint64_t tiling_flags;
        uint32_t domain;
        int r;
+       bool tmz_surface = false;
+       bool force_disable_dcc = false;
 
        dm_plane_state_old = to_dm_plane_state(plane->state);
        dm_plane_state_new = to_dm_plane_state(new_state);
@@ -5380,6 +5399,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 
        amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
 
+       tmz_surface = amdgpu_bo_encrypted(rbo);
+
        ttm_eu_backoff_reservation(&ticket, &list);
 
        afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -5390,11 +5411,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
                        dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
                struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
 
+               force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
                fill_plane_buffer_attributes(
                        adev, afb, plane_state->format, plane_state->rotation,
                        tiling_flags, &plane_state->tiling_info,
                        &plane_state->plane_size, &plane_state->dcc,
-                       &plane_state->address);
+                       &plane_state->address, tmz_surface,
+                       force_disable_dcc);
        }
 
        return 0;
@@ -5540,6 +5563,10 @@ static int get_plane_formats(const struct drm_plane *plane,
                        formats[num_formats++] = DRM_FORMAT_NV12;
                if (plane_cap && plane_cap->pixel_format_support.p010)
                        formats[num_formats++] = DRM_FORMAT_P010;
+               if (plane_cap && plane_cap->pixel_format_support.fp16) {
+                       formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
+                       formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
+               }
                break;
 
        case DRM_PLANE_TYPE_OVERLAY:
@@ -6092,7 +6119,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 
        if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
                || connector_type == DRM_MODE_CONNECTOR_eDP)
-               amdgpu_dm_initialize_dp_connector(dm, aconnector);
+               amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
 
 out_free:
        if (res) {
@@ -6567,6 +6594,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        unsigned long flags;
        struct amdgpu_bo *abo;
        uint64_t tiling_flags;
+       bool tmz_surface = false;
        uint32_t target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
        bool pflip_present = false;
@@ -6619,6 +6647,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                if (new_pcrtc_state->color_mgmt_changed) {
                        bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
                        bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+                       bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
                }
 
                fill_dc_scaling_info(new_plane_state,
@@ -6661,12 +6690,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 
                amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
 
+               tmz_surface = amdgpu_bo_encrypted(abo);
+
                amdgpu_bo_unreserve(abo);
 
                fill_dc_plane_info_and_addr(
                        dm->adev, new_plane_state, tiling_flags,
                        &bundle->plane_infos[planes_count],
-                       &bundle->flip_addrs[planes_count].address);
+                       &bundle->flip_addrs[planes_count].address,
+                       tmz_surface,
+                       false);
+
+               DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+                                new_plane_state->plane->index,
+                                bundle->plane_infos[planes_count].dcc.enable);
 
                bundle->surface_updates[planes_count].plane_info =
                        &bundle->plane_infos[planes_count];
@@ -6807,7 +6844,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                }
                mutex_lock(&dm->dc_lock);
                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
-                               acrtc_state->stream->link->psr_allow_active)
+                               acrtc_state->stream->link->psr_settings.psr_allow_active)
                        amdgpu_dm_psr_disable(acrtc_state->stream);
 
                dc_commit_updates_for_stream(dm->dc,
@@ -6818,12 +6855,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                                     dc_state);
 
                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
-                                               acrtc_state->stream->psr_version &&
-                                               !acrtc_state->stream->link->psr_feature_enabled)
+                               acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+                               !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
                        amdgpu_dm_link_setup_psr(acrtc_state->stream);
                else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
-                                               acrtc_state->stream->link->psr_feature_enabled &&
-                                               !acrtc_state->stream->link->psr_allow_active) {
+                               acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
+                               !acrtc_state->stream->link->psr_settings.psr_allow_active) {
                        amdgpu_dm_psr_enable(acrtc_state->stream);
                }
 
@@ -7137,7 +7174,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
                        /* i.e. reset mode */
                        if (dm_old_crtc_state->stream) {
-                               if (dm_old_crtc_state->stream->link->psr_allow_active)
+                               if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
                                        amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
 
                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -7848,6 +7885,7 @@ static int dm_update_plane_state(struct dc *dc,
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
        struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+       struct amdgpu_crtc *new_acrtc;
        bool needs_reset;
        int ret = 0;
 
@@ -7857,9 +7895,30 @@ static int dm_update_plane_state(struct dc *dc,
        dm_new_plane_state = to_dm_plane_state(new_plane_state);
        dm_old_plane_state = to_dm_plane_state(old_plane_state);
 
-       /*TODO Implement atomic check for cursor plane */
-       if (plane->type == DRM_PLANE_TYPE_CURSOR)
+       /*TODO Implement better atomic check for cursor plane */
+       if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+               if (!enable || !new_plane_crtc ||
+                       drm_atomic_plane_disabling(plane->state, new_plane_state))
+                       return 0;
+
+               new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+               if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
+                       (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
+                       DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
+                                                        new_plane_state->crtc_w, new_plane_state->crtc_h);
+                       return -EINVAL;
+               }
+
+               if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
+                       new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
+                       DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
+                                                        new_plane_state->crtc_x, new_plane_state->crtc_y);
+                       return -EINVAL;
+               }
+
                return 0;
+       }
 
        needs_reset = should_reset_plane(state, plane, old_plane_state,
                                         new_plane_state);
@@ -8034,6 +8093,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                        struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
                        struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
                        uint64_t tiling_flags;
+                       bool tmz_surface = false;
 
                        new_plane_crtc = new_plane_state->crtc;
                        new_dm_plane_state = to_dm_plane_state(new_plane_state);
@@ -8063,6 +8123,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                                                new_dm_plane_state->dc_state->gamma_correction;
                                bundle->surface_updates[num_plane].in_transfer_func =
                                                new_dm_plane_state->dc_state->in_transfer_func;
+                               bundle->surface_updates[num_plane].gamut_remap_matrix =
+                                               &new_dm_plane_state->dc_state->gamut_remap_matrix;
                                bundle->stream_update.gamut_remap =
                                                &new_dm_crtc_state->stream->gamut_remap_matrix;
                                bundle->stream_update.output_csc_transform =
@@ -8079,14 +8141,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                        bundle->surface_updates[num_plane].scaling_info = scaling_info;
 
                        if (amdgpu_fb) {
-                               ret = get_fb_info(amdgpu_fb, &tiling_flags);
+                               ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
                                if (ret)
                                        goto cleanup;
 
                                ret = fill_dc_plane_info_and_addr(
                                        dm->adev, new_plane_state, tiling_flags,
                                        plane_info,
-                                       &flip_addr->address);
+                                       &flip_addr->address, tmz_surface,
+                                       false);
                                if (ret)
                                        goto cleanup;
 
@@ -8586,8 +8649,17 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link)
                return;
        if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
                                        dpcd_data, sizeof(dpcd_data))) {
-               link->psr_feature_enabled = dpcd_data[0] ? true:false;
-               DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+               link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+
+               if (dpcd_data[0] == 0) {
+                       link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+                       link->psr_settings.psr_feature_enabled = false;
+               } else {
+                       link->psr_settings.psr_version = DC_PSR_VERSION_1;
+                       link->psr_settings.psr_feature_enabled = true;
+               }
+
+               DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
        }
 }
 
@@ -8602,16 +8674,14 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
        struct dc_link *link = NULL;
        struct psr_config psr_config = {0};
        struct psr_context psr_context = {0};
-       struct dc *dc = NULL;
        bool ret = false;
 
        if (stream == NULL)
                return false;
 
        link = stream->link;
-       dc = link->ctx->dc;
 
-       psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+       psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
 
        if (psr_config.psr_version > 0) {
                psr_config.psr_exit_link_training_required = 0x1;
@@ -8623,7 +8693,7 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
                ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
 
        }
-       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_feature_enabled);
+       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
 
        return ret;
 }
index 2233d293a707a9e79d9f6d4da4433eb2ad217a94..4dfb6b55bb2ede7f9408363d30b9dc4015a2ea56 100644 (file)
@@ -239,7 +239,8 @@ static int __set_output_tf(struct dc_transfer_func *func,
                 * instead to simulate this.
                 */
                gamma->type = GAMMA_CUSTOM;
-               res = mod_color_calculate_degamma_params(func, gamma, true);
+               res = mod_color_calculate_degamma_params(NULL, func,
+                                                       gamma, true);
        } else {
                /*
                 * Assume sRGB. The actual mapping will depend on whether the
@@ -271,7 +272,7 @@ static int __set_input_tf(struct dc_transfer_func *func,
 
        __drm_lut_to_dc_gamma(lut, gamma, false);
 
-       res = mod_color_calculate_degamma_params(func, gamma, true);
+       res = mod_color_calculate_degamma_params(NULL, func, gamma, true);
        dc_gamma_release(&gamma);
 
        return res ? 0 : -ENOMEM;
@@ -419,9 +420,21 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
                                      struct dc_plane_state *dc_plane_state)
 {
        const struct drm_color_lut *degamma_lut;
+       enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
        uint32_t degamma_size;
        int r;
 
+       /* Get the correct base transfer function for implicit degamma. */
+       switch (dc_plane_state->format) {
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+               /* DC doesn't have a transfer function for BT601 specifically. */
+               tf = TRANSFER_FUNCTION_BT709;
+               break;
+       default:
+               break;
+       }
+
        if (crtc->cm_has_degamma) {
                degamma_lut = __extract_blob_lut(crtc->base.degamma_lut,
                                                 &degamma_size);
@@ -455,8 +468,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
                 * map these to the atomic one instead.
                 */
                if (crtc->cm_is_degamma_srgb)
-                       dc_plane_state->in_transfer_func->tf =
-                               TRANSFER_FUNCTION_SRGB;
+                       dc_plane_state->in_transfer_func->tf = tf;
                else
                        dc_plane_state->in_transfer_func->tf =
                                TRANSFER_FUNCTION_LINEAR;
@@ -471,7 +483,12 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
                 * in linear space. Assume that the input is sRGB.
                 */
                dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
-               dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+               dc_plane_state->in_transfer_func->tf = tf;
+
+               if (tf != TRANSFER_FUNCTION_SRGB &&
+                   !mod_color_calculate_degamma_params(NULL,
+                           dc_plane_state->in_transfer_func, NULL, false))
+                       return -ENOMEM;
        } else {
                /* ...Otherwise we can just bypass the DGM block. */
                dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
index 0461fecd68db336d0e16f1f5ab55bac24f4caf8d..076af267b4882c876d57f68ee8212858344af635 100644 (file)
@@ -32,7 +32,7 @@
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_debugfs.h"
 #include "dm_helpers.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
 
 struct dmub_debugfs_trace_header {
        uint32_t entry_count;
@@ -838,6 +838,44 @@ static int vrr_range_show(struct seq_file *m, void *data)
        return 0;
 }
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/*
+ * Returns the HDCP capability of the Display (1.4 for now).
+ *
+ * NOTE* Not all HDMI displays report their HDCP caps even when they are capable.
+ * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable.
+ *
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability
+ *             or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability
+ */
+static int hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       bool hdcp_cap, hdcp2_cap;
+
+       if (connector->status != connector_status_connected)
+               return -ENODEV;
+
+       seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
+
+       hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
+       hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+
+
+       if (hdcp_cap)
+               seq_printf(m, "%s ", "HDCP1.4");
+       if (hdcp2_cap)
+               seq_printf(m, "%s ", "HDCP2.2");
+
+       if (!hdcp_cap && !hdcp2_cap)
+               seq_printf(m, "%s ", "None");
+
+       seq_puts(m, "\n");
+
+       return 0;
+}
+#endif
 /* function description
  *
  * generic SDP message access for testing
@@ -964,6 +1002,9 @@ DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 DEFINE_SHOW_ATTRIBUTE(output_bpc);
 DEFINE_SHOW_ATTRIBUTE(vrr_range);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
+#endif
 
 static const struct file_operations dp_link_settings_debugfs_fops = {
        .owner = THIS_MODULE,
@@ -1019,12 +1060,23 @@ static const struct {
                {"test_pattern", &dp_phy_test_pattern_fops},
                {"output_bpc", &output_bpc_fops},
                {"vrr_range", &vrr_range_fops},
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+               {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+#endif
                {"sdp_message", &sdp_message_fops},
                {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
                {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
                {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
 };
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static const struct {
+       char *name;
+       const struct file_operations *fops;
+} hdmi_debugfs_entries[] = {
+               {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+};
+#endif
 /*
  * Force YUV420 output if available from the given mode
  */
@@ -1093,6 +1145,15 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
        connector->debugfs_dpcd_address = 0;
        connector->debugfs_dpcd_size = 0;
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+               for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
+                       debugfs_create_file(hdmi_debugfs_entries[i].name,
+                                           0644, dir, connector,
+                                           hdmi_debugfs_entries[i].fops);
+               }
+       }
+#endif
 }
 
 /*
@@ -1167,8 +1228,9 @@ static int current_backlight_read(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
-       struct dc *dc = adev->dm.dc;
-       unsigned int backlight = dc_get_current_backlight_pwm(dc);
+       struct amdgpu_display_manager *dm = &adev->dm;
+
+       unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
 
        seq_printf(m, "0x%x\n", backlight);
        return 0;
@@ -1184,8 +1246,9 @@ static int target_backlight_read(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
-       struct dc *dc = adev->dm.dc;
-       unsigned int backlight = dc_get_target_backlight_pwm(dc);
+       struct amdgpu_display_manager *dm = &adev->dm;
+
+       unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
 
        seq_printf(m, "0x%x\n", backlight);
        return 0;
index 78e1c11d4ae544d59516dc4e17a8732c14d38bf9..dcf84a61de37f3b1254da646ec3688c00de3ecbf 100644 (file)
@@ -398,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
        struct mod_hdcp_display *display = &hdcp_work[link_index].display;
        struct mod_hdcp_link *link = &hdcp_work[link_index].link;
 
-       memset(display, 0, sizeof(*display));
-       memset(link, 0, sizeof(*link));
-
-       display->index = aconnector->base.index;
-
        if (config->dpms_off) {
                hdcp_remove_display(hdcp_work, link_index, aconnector);
                return;
        }
+
+       memset(display, 0, sizeof(*display));
+       memset(link, 0, sizeof(*link));
+
+       display->index = aconnector->base.index;
        display->state = MOD_HDCP_DISPLAY_ACTIVE;
 
        if (aconnector->dc_sink != NULL)
index c20fb08c450be4489f020a1030d20c9fa3791cae..b086d5c906e0f21d9d190cde994088f11a2e97a2 100644 (file)
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
        struct amdgpu_dm_connector *aconnector = link->priv;
 
        if (!aconnector) {
-               DRM_ERROR("Failed to find connector for link!");
+               DC_LOG_DC("Failed to find connector for link!\n");
                return false;
        }
 
@@ -554,6 +554,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
                struct dc_sink *sink)
 {
        struct amdgpu_dm_connector *aconnector = link->priv;
+       struct drm_connector *connector = &aconnector->base;
        struct i2c_adapter *ddc;
        int retry = 3;
        enum dc_edid_status edid_status;
@@ -571,6 +572,15 @@ enum dc_edid_status dm_helpers_read_local_edid(
 
                edid = drm_get_edid(&aconnector->base, ddc);
 
+               /* DP Compliance Test 4.2.2.6 */
+               if (link->aux_mode && connector->edid_corrupt)
+                       drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
+
+               if (!edid && connector->edid_corrupt) {
+                       connector->edid_corrupt = false;
+                       return EDID_BAD_CHECKSUM;
+               }
+
                if (!edid)
                        return EDID_NO_RESPONSE;
 
@@ -605,34 +615,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
                DRM_ERROR("EDID err: %d, on connector: %s",
                                edid_status,
                                aconnector->base.name);
-       if (link->aux_mode) {
-               union test_request test_request = { {0} };
-               union test_response test_response = { {0} };
-
-               dm_helpers_dp_read_dpcd(ctx,
-                                       link,
-                                       DP_TEST_REQUEST,
-                                       &test_request.raw,
-                                       sizeof(union test_request));
-
-               if (!test_request.bits.EDID_READ)
-                       return edid_status;
 
-               test_response.bits.EDID_CHECKSUM_WRITE = 1;
-
-               dm_helpers_dp_write_dpcd(ctx,
-                                       link,
-                                       DP_TEST_EDID_CHECKSUM,
-                                       &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
-                                       1);
-
-               dm_helpers_dp_write_dpcd(ctx,
-                                       link,
-                                       DP_TEST_RESPONSE,
-                                       &test_response.raw,
-                                       sizeof(test_response));
-
-       }
+       /* DP Compliance Test 4.2.2.3 */
+       if (link->aux_mode)
+               drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
 
        return edid_status;
 }
index fabbe78d5aefeefc6f933aef4deadc4f486b1157..ae0a7ef1d595a6e226754a007e6c1f163cad254b 100644 (file)
 #include "amdgpu_dm_debugfs.h"
 #endif
 
-
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dc/dcn20/dcn20_resource.h"
 #endif
 
-/* #define TRACE_DPCD */
-
-#ifdef TRACE_DPCD
-#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
-
-static inline char *side_band_msg_type_to_str(uint32_t address)
-{
-       static char str[10] = {0};
-
-       if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
-               strcpy(str, "DOWN_REQ");
-       else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
-               strcpy(str, "UP_REP");
-       else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
-               strcpy(str, "DOWN_REP");
-       else
-               strcpy(str, "UP_REQ");
-
-       return str;
-}
-
-static void log_dpcd(uint8_t type,
-                    uint32_t address,
-                    uint8_t *data,
-                    uint32_t size,
-                    bool res)
-{
-       DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
-                       (type == DP_AUX_NATIVE_READ) ||
-                       (type == DP_AUX_I2C_READ) ?
-                                       "Read" : "Write",
-                       address,
-                       SIDE_BAND_MSG(address) ?
-                                       side_band_msg_type_to_str(address) : "Nop",
-                       res ? "OK" : "Fail");
-
-       if (res) {
-               print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
-       }
-}
-#endif
-
 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                                  struct drm_dp_aux_msg *msg)
 {
@@ -136,17 +93,23 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
 static void
 dm_dp_mst_connector_destroy(struct drm_connector *connector)
 {
-       struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
-       struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+       struct amdgpu_dm_connector *aconnector =
+               to_amdgpu_dm_connector(connector);
+       struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
 
-       kfree(amdgpu_dm_connector->edid);
-       amdgpu_dm_connector->edid = NULL;
+       if (aconnector->dc_sink) {
+               dc_link_remove_remote_sink(aconnector->dc_link,
+                                          aconnector->dc_sink);
+               dc_sink_release(aconnector->dc_sink);
+       }
+
+       kfree(aconnector->edid);
 
        drm_encoder_cleanup(&amdgpu_encoder->base);
        kfree(amdgpu_encoder);
        drm_connector_cleanup(connector);
-       drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
-       kfree(amdgpu_dm_connector);
+       drm_dp_mst_put_port_malloc(aconnector->port);
+       kfree(aconnector);
 }
 
 static int
@@ -156,16 +119,16 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
                to_amdgpu_dm_connector(connector);
        int r;
 
-       amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
-       r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
-       if (r)
+       r = drm_dp_mst_connector_late_register(connector,
+                                              amdgpu_dm_connector->port);
+       if (r < 0)
                return r;
 
 #if defined(CONFIG_DEBUG_FS)
        connector_debugfs_init(amdgpu_dm_connector);
 #endif
 
-       return r;
+       return 0;
 }
 
 static void
@@ -435,46 +398,22 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
         */
        amdgpu_dm_connector_funcs_reset(connector);
 
-       DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
-                aconnector, connector->base.id, aconnector->mst_port);
-
        drm_dp_mst_get_port_malloc(port);
 
-       DRM_DEBUG_KMS(":%d\n", connector->base.id);
-
        return connector;
 }
 
-static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
-                                       struct drm_connector *connector)
-{
-       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
-       DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
-                aconnector, connector->base.id, aconnector->mst_port);
-
-       if (aconnector->dc_sink) {
-               amdgpu_dm_update_freesync_caps(connector, NULL);
-               dc_link_remove_remote_sink(aconnector->dc_link,
-                                          aconnector->dc_sink);
-               dc_sink_release(aconnector->dc_sink);
-               aconnector->dc_sink = NULL;
-               aconnector->dc_link->cur_link_settings.lane_count = 0;
-       }
-
-       drm_connector_unregister(connector);
-       drm_connector_put(connector);
-}
-
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
        .add_connector = dm_dp_add_mst_connector,
-       .destroy_connector = dm_dp_destroy_mst_connector,
 };
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
-                                      struct amdgpu_dm_connector *aconnector)
+                                      struct amdgpu_dm_connector *aconnector,
+                                      int link_index)
 {
-       aconnector->dm_dp_aux.aux.name = "dmdc";
+       aconnector->dm_dp_aux.aux.name =
+               kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
+                         link_index);
        aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
        aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
 
index d6813ce67bbd36844b5af0ac235b7190e8164a44..d2c56579a2cc412a3074ba0a089f29b891492c77 100644 (file)
@@ -32,7 +32,8 @@ struct amdgpu_dm_connector;
 int dm_mst_get_pbn_divider(struct dc_link *link);
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
-                                      struct amdgpu_dm_connector *aconnector);
+                                      struct amdgpu_dm_connector *aconnector,
+                                      int link_index);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
index 7ad0cad0f4efedd9fcec850d18b13c596872f21e..01b99e0d788e38b0121a4e44db6887c79d0c90f9 100644 (file)
@@ -24,8 +24,7 @@
 # It provides the general basic services required by other DAL
 # subcomponents.
 
-BASICS = conversion.o fixpt31_32.o \
-       log_helpers.o vector.o dc_common.o
+BASICS = conversion.o fixpt31_32.o vector.o dc_common.o
 
 AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
 
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
deleted file mode 100644 (file)
index 26583f3..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "core_types.h"
-#include "logger.h"
-#include "include/logger_interface.h"
-#include "dm_helpers.h"
-
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
-{
-       int i;
-
-       if (hex_data)
-               for (i = 0; i < hex_data_count; i++)
-                       DC_LOG_DEBUG("%2.2X ", hex_data[i]);
-}
-
index 8edc2506d49e7b1838287a69875ab62429e6774b..bed91572f82a6a3bd44ab338c8ad76c2d294147e 100644 (file)
@@ -113,13 +113,19 @@ static void encoder_control_dmcub(
                struct dc_dmub_srv *dmcub,
                struct dig_encoder_stream_setup_parameters_v1_5 *dig)
 {
-       struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+       union dmub_rb_cmd cmd;
 
-       encoder_control.header.type = DMUB_CMD__VBIOS;
-       encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
-       encoder_control.encoder_control.dig.stream_param = *dig;
+       memset(&cmd, 0, sizeof(cmd));
 
-       dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+       cmd.digx_encoder_control.header.type = DMUB_CMD__VBIOS;
+       cmd.digx_encoder_control.header.sub_type =
+               DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+       cmd.digx_encoder_control.header.payload_bytes =
+               sizeof(cmd.digx_encoder_control) -
+               sizeof(cmd.digx_encoder_control.header);
+       cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
+
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
@@ -238,14 +244,19 @@ static void transmitter_control_dmcub(
                struct dc_dmub_srv *dmcub,
                struct dig_transmitter_control_parameters_v1_6 *dig)
 {
-       struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
 
-       transmitter_control.header.type = DMUB_CMD__VBIOS;
-       transmitter_control.header.sub_type =
+       cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS;
+       cmd.dig1_transmitter_control.header.sub_type =
                DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
-       transmitter_control.transmitter_control.dig = *dig;
+       cmd.dig1_transmitter_control.header.payload_bytes =
+               sizeof(cmd.dig1_transmitter_control) -
+               sizeof(cmd.dig1_transmitter_control.header);
+       cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
 
-       dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
@@ -339,13 +350,18 @@ static void set_pixel_clock_dmcub(
                struct dc_dmub_srv *dmcub,
                struct set_pixel_clock_parameter_v1_7 *clk)
 {
-       struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
+       union dmub_rb_cmd cmd;
 
-       pixel_clock.header.type = DMUB_CMD__VBIOS;
-       pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
-       pixel_clock.pixel_clock.clk = *clk;
+       memset(&cmd, 0, sizeof(cmd));
 
-       dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+       cmd.set_pixel_clock.header.type = DMUB_CMD__VBIOS;
+       cmd.set_pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+       cmd.set_pixel_clock.header.payload_bytes =
+               sizeof(cmd.set_pixel_clock) -
+               sizeof(cmd.set_pixel_clock.header);
+       cmd.set_pixel_clock.pixel_clock.clk = *clk;
+
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
@@ -705,13 +721,19 @@ static void enable_disp_power_gating_dmcub(
        struct dc_dmub_srv *dmcub,
        struct enable_disp_power_gating_parameters_v2_1 *pwr)
 {
-       struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
 
-       power_gating.header.type = DMUB_CMD__VBIOS;
-       power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
-       power_gating.power_gating.pwr = *pwr;
+       cmd.enable_disp_power_gating.header.type = DMUB_CMD__VBIOS;
+       cmd.enable_disp_power_gating.header.sub_type =
+               DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+       cmd.enable_disp_power_gating.header.payload_bytes =
+               sizeof(cmd.enable_disp_power_gating) -
+               sizeof(cmd.enable_disp_power_gating.header);
+       cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
 
-       dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
index 8ec2dfe45d4009877756d7d07047cbd267cd59fb..a5c2114e4292f375a0192515d138ef879e294d5d 100644 (file)
@@ -90,7 +90,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
                dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
 
        if (edp_link) {
-               clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+               clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
                dc_link_set_psr_allow_active(edp_link, false, false);
        }
 
index 26db1c5d4e4d2619dea1ba09c4d8ad2f92678cd9..b210f8e9d592d1168685725b9947e1777ada7958 100644 (file)
@@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
        int dprefclk_wdivider;
        int dprefclk_src_sel;
-       int dp_ref_clk_khz = 600000;
+       int dp_ref_clk_khz;
        int target_div;
 
        /* ASSERT DP Reference Clock source is from DFS*/
index 97b7f32294fd85943b916505e76f5f8adb4bb0f2..c320b7af7d34ca77683006401c274ac9c1dbd9b9 100644 (file)
@@ -97,9 +97,6 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
                        VBIOSSMC_MSG_SetDispclkFreq,
                        requested_dispclk_khz / 1000);
 
-       /* Actual dispclk set is returned in the parameter register */
-       actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
-
        if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
                if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
                        if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
index 8489f1e56892c9b5dce662812887f0dba40e49b8..45cfb7c45566aff4e017a166252503a1e1089a91 100644 (file)
@@ -66,6 +66,8 @@
 
 #include "dce/dce_i2c.h"
 
+#include "dmub/dmub_srv.h"
+
 #define CTX \
        dc->ctx
 
@@ -348,7 +350,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
 
        for (i = 0; i < MAX_PIPES; i++) {
                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-               if (pipe->stream == stream)
+               if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
                        break;
        }
        /* Stream not found */
@@ -365,6 +367,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
        param.windowb_x_end = pipe->stream->timing.h_addressable;
        param.windowb_y_end = pipe->stream->timing.v_addressable;
 
+       param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
+       param.odm_mode = pipe->next_odm_pipe ? 1:0;
+
        /* Default to the union of both windows */
        param.selection = UNION_WINDOW_A_B;
        param.continuous_mode = continuous;
@@ -834,11 +839,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 {
        int i;
-       int count = 0;
-       struct pipe_ctx *pipe;
        PERF_TRACE();
        for (i = 0; i < MAX_PIPES; i++) {
-               pipe = &context->res_ctx.pipe_ctx[i];
+               int count = 0;
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                if (!pipe->plane_state)
                        continue;
@@ -2205,7 +2209,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
 
                                if (should_program_abm) {
                                        if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
-                                               pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+                                               dc->hwss.set_abm_immediate_disable(pipe_ctx);
                                        } else {
                                                pipe_ctx->stream_res.abm->funcs->set_abm_level(
                                                        pipe_ctx->stream_res.abm, stream->abm_level);
@@ -2641,33 +2645,12 @@ void dc_set_power_state(
 
 void dc_resume(struct dc *dc)
 {
-
        uint32_t i;
 
        for (i = 0; i < dc->link_count; i++)
                core_link_resume(dc->links[i]);
 }
 
-unsigned int dc_get_current_backlight_pwm(struct dc *dc)
-{
-       struct abm *abm = dc->res_pool->abm;
-
-       if (abm)
-               return abm->funcs->get_current_backlight(abm);
-
-       return 0;
-}
-
-unsigned int dc_get_target_backlight_pwm(struct dc *dc)
-{
-       struct abm *abm = dc->res_pool->abm;
-
-       if (abm)
-               return abm->funcs->get_target_backlight(abm);
-
-       return 0;
-}
-
 bool dc_is_dmcu_initialized(struct dc *dc)
 {
        struct dmcu *dmcu = dc->res_pool->dmcu;
index 67cfff1586e9fe6e91cd880e4d6a5942cf2ed8c1..c08de6823db4b83590603f8e0cb6796c02a7d2c5 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/slab.h>
 
 #include "dm_services.h"
-#include "atom.h"
+#include "atomfirmware.h"
 #include "dm_helpers.h"
 #include "dc.h"
 #include "grph_object_id.h"
 #include "dmcu.h"
 #include "hw/clk_mgr.h"
 #include "dce/dmub_psr.h"
+#include "dmub/dmub_srv.h"
+#include "inc/hw/panel_cntl.h"
 
 #define DC_LOGGER_INIT(logger)
 
-
 #define LINK_INFO(...) \
        DC_LOG_HW_HOTPLUG(  \
                __VA_ARGS__)
 enum {
        PEAK_FACTOR_X1000 = 1006,
        /*
-       * Some receivers fail to train on first try and are good
-       * on subsequent tries. 2 retries should be plenty. If we
-       * don't have a successful training then we don't expect to
-       * ever get one.
-       */
+        * Some receivers fail to train on first try and are good
+        * on subsequent tries. 2 retries should be plenty. If we
+        * don't have a successful training then we don't expect to
+        * ever get one.
+        */
        LINK_TRAINING_MAX_VERIFY_RETRY = 2
 };
 
@@ -79,7 +80,7 @@ static void dc_link_destruct(struct dc_link *link)
 {
        int i;
 
-       if (link->hpd_gpio != NULL) {
+       if (link->hpd_gpio) {
                dal_gpio_destroy_irq(&link->hpd_gpio);
                link->hpd_gpio = NULL;
        }
@@ -87,7 +88,10 @@ static void dc_link_destruct(struct dc_link *link)
        if (link->ddc)
                dal_ddc_service_destroy(&link->ddc);
 
-       if(link->link_enc)
+       if (link->panel_cntl)
+               link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+       if (link->link_enc)
                link->link_enc->funcs->destroy(&link->link_enc);
 
        if (link->local_sink)
@@ -98,8 +102,8 @@ static void dc_link_destruct(struct dc_link *link)
 }
 
 struct gpio *get_hpd_gpio(struct dc_bios *dcb,
-               struct graphics_object_id link_id,
-               struct gpio_service *gpio_service)
+                         struct graphics_object_id link_id,
+                         struct gpio_service *gpio_service)
 {
        enum bp_result bp_result;
        struct graphics_object_hpd_info hpd_info;
@@ -116,10 +120,9 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
                return NULL;
        }
 
-       return dal_gpio_service_create_irq(
-               gpio_service,
-               pin_info.offset,
-               pin_info.mask);
+       return dal_gpio_service_create_irq(gpio_service,
+                                          pin_info.offset,
+                                          pin_info.mask);
 }
 
 /*
@@ -134,13 +137,10 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
  *  @return
  *     true on success, false otherwise
  */
-static bool program_hpd_filter(
-       const struct dc_link *link)
+static bool program_hpd_filter(const struct dc_link *link)
 {
        bool result = false;
-
        struct gpio *hpd;
-
        int delay_on_connect_in_ms = 0;
        int delay_on_disconnect_in_ms = 0;
 
@@ -159,10 +159,10 @@ static bool program_hpd_filter(
        case SIGNAL_TYPE_DISPLAY_PORT_MST:
                /* Program hpd filter to allow DP signal to settle */
                /* 500: not able to detect MST <-> SST switch as HPD is low for
-                *      only 100ms on DELL U2413
-                * 0:   some passive dongle still show aux mode instead of i2c
-                * 20-50:not enough to hide bouncing HPD with passive dongle.
-                *      also see intermittent i2c read issues.
+                * only 100ms on DELL U2413
+                * 0: some passive dongle still show aux mode instead of i2c
+                * 20-50: not enough to hide bouncing HPD with passive dongle.
+                * also see intermittent i2c read issues.
                 */
                delay_on_connect_in_ms = 80;
                delay_on_disconnect_in_ms = 0;
@@ -175,7 +175,8 @@ static bool program_hpd_filter(
        }
 
        /* Obtain HPD handle */
-       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                          link->ctx->gpio_service);
 
        if (!hpd)
                return result;
@@ -226,8 +227,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
        }
 
        /* todo: may need to lock gpio access */
-       hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
-       if (hpd_pin == NULL)
+       hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                              link->ctx->gpio_service);
+       if (!hpd_pin)
                goto hpd_gpio_failure;
 
        dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
@@ -248,8 +250,7 @@ hpd_gpio_failure:
        return false;
 }
 
-static enum ddc_transaction_type get_ddc_transaction_type(
-               enum signal_type sink_signal)
+static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
 {
        enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
 
@@ -270,7 +271,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
        case SIGNAL_TYPE_DISPLAY_PORT_MST:
                /* MST does not use I2COverAux, but there is the
                 * SPECIAL use case for "immediate dwnstrm device
-                * access" (EPR#370830). */
+                * access" (EPR#370830).
+                */
                transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
                break;
 
@@ -281,9 +283,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
        return transaction_type;
 }
 
-static enum signal_type get_basic_signal_type(
-       struct graphics_object_id encoder,
-       struct graphics_object_id downstream)
+static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
+                                             struct graphics_object_id downstream)
 {
        if (downstream.type == OBJECT_TYPE_CONNECTOR) {
                switch (downstream.id) {
@@ -369,10 +370,11 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
        /* Open GPIO and set it to I2C mode */
        /* Note: this GpioMode_Input will be converted
         * to GpioConfigType_I2cAuxDualMode in GPIO component,
-        * which indicates we need additional delay */
+        * which indicates we need additional delay
+        */
 
-       if (GPIO_RESULT_OK != dal_ddc_open(
-               ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+       if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
+                        GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
                dal_ddc_close(ddc);
 
                return present;
@@ -406,25 +408,25 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
  * @brief
  * Detect output sink type
  */
-static enum signal_type link_detect_sink(
-       struct dc_link *link,
-       enum dc_detect_reason reason)
+static enum signal_type link_detect_sink(struct dc_link *link,
+                                        enum dc_detect_reason reason)
 {
-       enum signal_type result = get_basic_signal_type(
-               link->link_enc->id, link->link_id);
+       enum signal_type result = get_basic_signal_type(link->link_enc->id,
+                                                       link->link_id);
 
        /* Internal digital encoder will detect only dongles
-        * that require digital signal */
+        * that require digital signal
+        */
 
        /* Detection mechanism is different
         * for different native connectors.
         * LVDS connector supports only LVDS signal;
         * PCIE is a bus slot, the actual connector needs to be detected first;
         * eDP connector supports only eDP signal;
-        * HDMI should check straps for audio */
+        * HDMI should check straps for audio
+        */
 
        /* PCIE detects the actual connector on add-on board */
-
        if (link->link_id.id == CONNECTOR_ID_PCIE) {
                /* ZAZTODO implement PCIE add-on card detection */
        }
@@ -432,8 +434,10 @@ static enum signal_type link_detect_sink(
        switch (link->link_id.id) {
        case CONNECTOR_ID_HDMI_TYPE_A: {
                /* check audio support:
-                * if native HDMI is not supported, switch to DVI */
-               struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+                * if native HDMI is not supported, switch to DVI
+                */
+               struct audio_support *aud_support =
+                                       &link->dc->res_pool->audio_support;
 
                if (!aud_support->hdmi_audio_native)
                        if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
@@ -461,16 +465,15 @@ static enum signal_type link_detect_sink(
        return result;
 }
 
-static enum signal_type decide_signal_from_strap_and_dongle_type(
-               enum display_dongle_type dongle_type,
-               struct audio_support *audio_support)
+static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
+                                                                struct audio_support *audio_support)
 {
        enum signal_type signal = SIGNAL_TYPE_NONE;
 
        switch (dongle_type) {
        case DISPLAY_DONGLE_DP_HDMI_DONGLE:
                if (audio_support->hdmi_audio_on_dongle)
-                       signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+                       signal = SIGNAL_TYPE_HDMI_TYPE_A;
                else
                        signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
                break;
@@ -491,16 +494,14 @@ static enum signal_type decide_signal_from_strap_and_dongle_type(
        return signal;
 }
 
-static enum signal_type dp_passive_dongle_detection(
-               struct ddc_service *ddc,
-               struct display_sink_capability *sink_cap,
-               struct audio_support *audio_support)
+static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
+                                                   struct display_sink_capability *sink_cap,
+                                                   struct audio_support *audio_support)
 {
-       dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
-                                               ddc, sink_cap);
-       return decide_signal_from_strap_and_dongle_type(
-                       sink_cap->dongle_type,
-                       audio_support);
+       dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
+
+       return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
+                                                       audio_support);
 }
 
 static void link_disconnect_sink(struct dc_link *link)
@@ -519,6 +520,96 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
        link->local_sink = prev_sink;
 }
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+bool dc_link_is_hdcp14(struct dc_link *link)
+{
+       bool ret = false;
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_DISPLAY_PORT:
+       case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+               break;
+       case SIGNAL_TYPE_DVI_SINGLE_LINK:
+       case SIGNAL_TYPE_DVI_DUAL_LINK:
+       case SIGNAL_TYPE_HDMI_TYPE_A:
+       /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
+        * we can poll for bksv but some displays have an issue with this. Since its so rare
+        * for a display to not be 1.4 capable, this assumtion is ok
+        */
+               ret = true;
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+bool dc_link_is_hdcp22(struct dc_link *link)
+{
+       bool ret = false;
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_DISPLAY_PORT:
+       case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
+                               link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
+                               (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
+               break;
+       case SIGNAL_TYPE_DVI_SINGLE_LINK:
+       case SIGNAL_TYPE_DVI_DUAL_LINK:
+       case SIGNAL_TYPE_HDMI_TYPE_A:
+               ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
+{
+       struct hdcp_protection_message msg22;
+       struct hdcp_protection_message msg14;
+
+       memset(&msg22, 0, sizeof(struct hdcp_protection_message));
+       memset(&msg14, 0, sizeof(struct hdcp_protection_message));
+       memset(link->hdcp_caps.rx_caps.raw, 0,
+               sizeof(link->hdcp_caps.rx_caps.raw));
+
+       if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+                       link->ddc->transaction_type ==
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+                       link->connector_signal == SIGNAL_TYPE_EDP) {
+               msg22.data = link->hdcp_caps.rx_caps.raw;
+               msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+               msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+       } else {
+               msg22.data = &link->hdcp_caps.rx_caps.fields.version;
+               msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+               msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+       }
+       msg22.version = HDCP_VERSION_22;
+       msg22.link = HDCP_LINK_PRIMARY;
+       msg22.max_retries = 5;
+       dc_process_hdcp_msg(signal, link, &msg22);
+
+       if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+               enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+               msg14.data = &link->hdcp_caps.bcaps.raw;
+               msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+               msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+               msg14.version = HDCP_VERSION_14;
+               msg14.link = HDCP_LINK_PRIMARY;
+               msg14.max_retries = 5;
+
+               status = dc_process_hdcp_msg(signal, link, &msg14);
+       }
+
+}
+#endif
 
 static void read_current_link_settings_on_detect(struct dc_link *link)
 {
@@ -532,18 +623,18 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
 
        // Read DPCD 00101h to find out the number of lanes currently set
        for (i = 0; i < read_dpcd_retry_cnt; i++) {
-               status = core_link_read_dpcd(
-                               link,
-                               DP_LANE_COUNT_SET,
-                               &lane_count_set.raw,
-                               sizeof(lane_count_set));
+               status = core_link_read_dpcd(link,
+                                            DP_LANE_COUNT_SET,
+                                            &lane_count_set.raw,
+                                            sizeof(lane_count_set));
                /* First DPCD read after VDD ON can fail if the particular board
                 * does not have HPD pin wired correctly. So if DPCD read fails,
                 * which it should never happen, retry a few times. Target worst
                 * case scenario of 80 ms.
                 */
                if (status == DC_OK) {
-                       link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+                       link->cur_link_settings.lane_count =
+                                       lane_count_set.bits.LANE_COUNT_SET;
                        break;
                }
 
@@ -552,7 +643,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
 
        // Read DPCD 00100h to find if standard link rates are set
        core_link_read_dpcd(link, DP_LINK_BW_SET,
-                       &link_bw_set, sizeof(link_bw_set));
+                           &link_bw_set, sizeof(link_bw_set));
 
        if (link_bw_set == 0) {
                if (link->connector_signal == SIGNAL_TYPE_EDP) {
@@ -560,12 +651,12 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
                         * Read DPCD 00115h to find the edp link rate set used
                         */
                        core_link_read_dpcd(link, DP_LINK_RATE_SET,
-                                       &link_rate_set, sizeof(link_rate_set));
+                                           &link_rate_set, sizeof(link_rate_set));
 
                        // edp_supported_link_rates_count = 0 for DP
                        if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
                                link->cur_link_settings.link_rate =
-                                               link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+                                       link->dpcd_caps.edp_supported_link_rates[link_rate_set];
                                link->cur_link_settings.link_rate_set = link_rate_set;
                                link->cur_link_settings.use_link_rate_set = true;
                        }
@@ -579,7 +670,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
        }
        // Read DPCD 00003h to find the max down spread.
        core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
-                       &max_down_spread.raw, sizeof(max_down_spread));
+                           &max_down_spread.raw, sizeof(max_down_spread));
        link->cur_link_settings.link_spread =
                max_down_spread.bits.MAX_DOWN_SPREAD ?
                LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
@@ -612,6 +703,12 @@ static bool detect_dp(struct dc_link *link,
                        dal_ddc_service_set_transaction_type(link->ddc,
                                                             sink_caps->transaction_type);
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+                       /* In case of fallback to SST when topology discovery below fails
+                        * HDCP caps will be querried again later by the upper layer (caller
+                        * of this function). */
+                       query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
+#endif
                        /*
                         * This call will initiate MST topology discovery. Which
                         * will detect MST ports and add new DRM connector DRM
@@ -683,12 +780,12 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
        if (new_edid->length == 0)
                return false;
 
-       return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
+       return (memcmp(old_edid->raw_edid,
+                      new_edid->raw_edid, new_edid->length) == 0);
 }
 
-static bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
 {
-
        /**
         * something is terribly wrong if time out is > 200ms. (5Hz)
         * 500 microseconds * 400 tries us 200 ms
@@ -703,7 +800,7 @@ static bool wait_for_alt_mode(struct dc_link *link)
 
        DC_LOGGER_INIT(link->ctx->logger);
 
-       if (link->link_enc->funcs->is_in_alt_mode == NULL)
+       if (!link->link_enc->funcs->is_in_alt_mode)
                return true;
 
        is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
@@ -718,21 +815,21 @@ static bool wait_for_alt_mode(struct dc_link *link)
                udelay(sleep_time_in_microseconds);
                /* ask the link if alt mode is enabled, if so return ok */
                if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
-
                        finish_timestamp = dm_get_timestamp(link->ctx);
-                       time_taken_in_ns = dm_get_elapse_time_in_ns(
-                               link->ctx, finish_timestamp, enter_timestamp);
+                       time_taken_in_ns =
+                               dm_get_elapse_time_in_ns(link->ctx,
+                                                        finish_timestamp,
+                                                        enter_timestamp);
                        DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
                                       div_u64(time_taken_in_ns, 1000000));
                        return true;
                }
-
        }
        finish_timestamp = dm_get_timestamp(link->ctx);
        time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
                                                    enter_timestamp);
        DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
-                       div_u64(time_taken_in_ns, 1000000));
+                      div_u64(time_taken_in_ns, 1000000));
        return false;
 }
 
@@ -768,30 +865,30 @@ static bool dc_link_detect_helper(struct dc_link *link,
                return false;
 
        if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
-                       link->connector_signal == SIGNAL_TYPE_EDP) &&
-                       link->local_sink) {
-
+            link->connector_signal == SIGNAL_TYPE_EDP) &&
+           link->local_sink) {
                // need to re-write OUI and brightness in resume case
                if (link->connector_signal == SIGNAL_TYPE_EDP) {
                        dpcd_set_source_specific_data(link);
-                       dc_link_set_default_brightness_aux(link); //TODO: use cached
+                       dc_link_set_default_brightness_aux(link);
+                       //TODO: use cached
                }
 
                return true;
        }
 
-       if (false == dc_link_detect_sink(link, &new_connection_type)) {
+       if (!dc_link_detect_sink(link, &new_connection_type)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
 
        prev_sink = link->local_sink;
-       if (prev_sink != NULL) {
+       if (prev_sink) {
                dc_sink_retain(prev_sink);
                memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
        }
-       link_disconnect_sink(link);
 
+       link_disconnect_sink(link);
        if (new_connection_type != dc_connection_none) {
                link->type = new_connection_type;
                link->link_state_valid = false;
@@ -838,35 +935,31 @@ static bool dc_link_detect_helper(struct dc_link *link,
                }
 
                case SIGNAL_TYPE_DISPLAY_PORT: {
-
                        /* wa HPD high coming too early*/
                        if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
-
                                /* if alt mode times out, return false */
-                               if (wait_for_alt_mode(link) == false) {
+                               if (!wait_for_entering_dp_alt_mode(link))
                                        return false;
-                               }
                        }
 
-                       if (!detect_dp(
-                               link,
-                               &sink_caps,
-                               &converter_disable_audio,
-                               aud_support, reason)) {
-                               if (prev_sink != NULL)
+                       if (!detect_dp(link, &sink_caps,
+                                      &converter_disable_audio,
+                                      aud_support, reason)) {
+                               if (prev_sink)
                                        dc_sink_release(prev_sink);
                                return false;
                        }
 
                        // Check if dpcp block is the same
-                       if (prev_sink != NULL) {
-                               if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
+                       if (prev_sink) {
+                               if (memcmp(&link->dpcd_caps, &prev_dpcd_caps,
+                                          sizeof(struct dpcd_caps)))
                                        same_dpcd = false;
                        }
                        /* Active dongle downstream unplug*/
                        if (link->type == dc_connection_active_dongle &&
-                               link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
-                               if (prev_sink != NULL)
+                           link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
+                               if (prev_sink)
                                        /* Downstream unplug */
                                        dc_sink_release(prev_sink);
                                return true;
@@ -874,7 +967,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
 
                        if (link->type == dc_connection_mst_branch) {
                                LINK_INFO("link=%d, mst branch is now Connected\n",
-                                       link->link_index);
+                                         link->link_index);
                                /* Need to setup mst link_cap struct here
                                 * otherwise dc_link_detect() will leave mst link_cap
                                 * empty which leads to allocate_mst_payload() has "0"
@@ -882,15 +975,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
                                 */
                                dp_verify_mst_link_cap(link);
 
-                               if (prev_sink != NULL)
+                               if (prev_sink)
                                        dc_sink_release(prev_sink);
                                return false;
                        }
 
                        // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
                        if (reason == DETECT_REASON_BOOT &&
-                                       dc_ctx->dc->config.power_down_display_on_boot == false &&
-                                       link->link_status.link_active == true)
+                           !dc_ctx->dc->config.power_down_display_on_boot &&
+                           link->link_status.link_active)
                                perform_dp_seamless_boot = true;
 
                        if (perform_dp_seamless_boot) {
@@ -903,24 +996,23 @@ static bool dc_link_detect_helper(struct dc_link *link,
 
                default:
                        DC_ERROR("Invalid connector type! signal:%d\n",
-                               link->connector_signal);
-                       if (prev_sink != NULL)
+                                link->connector_signal);
+                       if (prev_sink)
                                dc_sink_release(prev_sink);
                        return false;
                } /* switch() */
 
                if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
-                       link->dpcd_sink_count = link->dpcd_caps.sink_count.
-                                       bits.SINK_COUNT;
+                       link->dpcd_sink_count =
+                               link->dpcd_caps.sink_count.bits.SINK_COUNT;
                else
                        link->dpcd_sink_count = 1;
 
-               dal_ddc_service_set_transaction_type(
-                                               link->ddc,
-                                               sink_caps.transaction_type);
+               dal_ddc_service_set_transaction_type(link->ddc,
+                                                    sink_caps.transaction_type);
 
-               link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
-                               link->ddc);
+               link->aux_mode =
+                       dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
 
                sink_init_data.link = link;
                sink_init_data.sink_signal = sink_caps.signal;
@@ -928,7 +1020,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
                sink = dc_sink_create(&sink_init_data);
                if (!sink) {
                        DC_ERROR("Failed to create sink!\n");
-                       if (prev_sink != NULL)
+                       if (prev_sink)
                                dc_sink_release(prev_sink);
                        return false;
                }
@@ -939,10 +1031,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
                /* dc_sink_create returns a new reference */
                link->local_sink = sink;
 
-               edid_status = dm_helpers_read_local_edid(
-                               link->ctx,
-                               link,
-                               sink);
+               edid_status = dm_helpers_read_local_edid(link->ctx,
+                                                        link, sink);
 
                switch (edid_status) {
                case EDID_BAD_CHECKSUM:
@@ -950,7 +1040,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        break;
                case EDID_NO_RESPONSE:
                        DC_LOG_ERROR("No EDID read.\n");
-
                        /*
                         * Abort detection for non-DP connectors if we have
                         * no EDID
@@ -961,7 +1050,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
                         */
                        if (dc_is_hdmi_signal(link->connector_signal) ||
                            dc_is_dvi_signal(link->connector_signal)) {
-                               if (prev_sink != NULL)
+                               if (prev_sink)
                                        dc_sink_release(prev_sink);
 
                                return false;
@@ -974,45 +1063,53 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        link->ctx->dc->debug.disable_fec = true;
 
                // Check if edid is the same
-               if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
-                       same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+               if ((prev_sink) &&
+                   (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
+                       same_edid = is_same_edid(&prev_sink->dc_edid,
+                                                &sink->dc_edid);
 
                if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
                        link->ctx->dc->debug.hdmi20_disable = true;
 
                if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
-                       sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+                   sink_caps.transaction_type ==
+                   DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
                        /*
                         * TODO debug why Dell 2413 doesn't like
                         *  two link trainings
                         */
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+                       query_hdcp_capability(sink->sink_signal, link);
+#endif
 
                        // verify link cap for SST non-seamless boot
                        if (!perform_dp_seamless_boot)
                                dp_verify_link_cap_with_retries(link,
-                                               &link->reported_link_cap,
-                                               LINK_TRAINING_MAX_VERIFY_RETRY);
+                                                               &link->reported_link_cap,
+                                                               LINK_TRAINING_MAX_VERIFY_RETRY);
                } else {
                        // If edid is the same, then discard new sink and revert back to original sink
                        if (same_edid) {
                                link_disconnect_remap(prev_sink, link);
                                sink = prev_sink;
                                prev_sink = NULL;
-
                        }
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+                       query_hdcp_capability(sink->sink_signal, link);
+#endif
                }
 
                /* HDMI-DVI Dongle */
                if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
-                               !sink->edid_caps.edid_hdmi)
+                   !sink->edid_caps.edid_hdmi)
                        sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
 
                /* Connectivity log: detection */
                for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
                        CONN_DATA_DETECT(link,
-                                       &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
-                                       DC_EDID_BLOCK_SIZE,
-                                       "%s: [Block %d] ", sink->edid_caps.display_name, i);
+                                        &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+                                        DC_EDID_BLOCK_SIZE,
+                                        "%s: [Block %d] ", sink->edid_caps.display_name, i);
                }
 
                DC_LOG_DETECTION_EDID_PARSER("%s: "
@@ -1047,17 +1144,18 @@ static bool dc_link_detect_helper(struct dc_link *link,
                                sink->edid_caps.audio_modes[i].sample_rate,
                                sink->edid_caps.audio_modes[i].sample_size);
                }
-
        } else {
                /* From Connected-to-Disconnected. */
                if (link->type == dc_connection_mst_branch) {
                        LINK_INFO("link=%d, mst branch is now Disconnected\n",
-                               link->link_index);
+                                 link->link_index);
 
                        dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
 
                        link->mst_stream_alloc_table.stream_count = 0;
-                       memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+                       memset(link->mst_stream_alloc_table.stream_allocations,
+                              0,
+                              sizeof(link->mst_stream_alloc_table.stream_allocations));
                }
 
                link->type = dc_connection_none;
@@ -1071,16 +1169,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
        }
 
        LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
-               link->link_index, sink,
-               (sink_caps.signal == SIGNAL_TYPE_NONE ?
-                       "Disconnected":"Connected"), prev_sink,
-                       same_dpcd, same_edid);
+                 link->link_index, sink,
+                 (sink_caps.signal ==
+                  SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
+                 prev_sink, same_dpcd, same_edid);
 
-       if (prev_sink != NULL)
+       if (prev_sink)
                dc_sink_release(prev_sink);
 
        return true;
-
 }
 
 bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
@@ -1110,13 +1207,13 @@ bool dc_link_get_hpd_state(struct dc_link *dc_link)
        return state;
 }
 
-static enum hpd_source_id get_hpd_line(
-               struct dc_link *link)
+static enum hpd_source_id get_hpd_line(struct dc_link *link)
 {
        struct gpio *hpd;
        enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
 
-       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                          link->ctx->gpio_service);
 
        if (hpd) {
                switch (dal_irq_get_source(hpd)) {
@@ -1191,8 +1288,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
        return channel;
 }
 
-static enum transmitter translate_encoder_to_transmitter(
-       struct graphics_object_id encoder)
+static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
 {
        switch (encoder.id) {
        case ENCODER_ID_INTERNAL_UNIPHY:
@@ -1256,17 +1352,18 @@ static enum transmitter translate_encoder_to_transmitter(
        }
 }
 
-static bool dc_link_construct(
-       struct dc_link *link,
-       const struct link_init_data *init_params)
+static bool dc_link_construct(struct dc_link *link,
+                             const struct link_init_data *init_params)
 {
        uint8_t i;
        struct ddc_service_init_data ddc_service_init_data = { { 0 } };
        struct dc_context *dc_ctx = init_params->ctx;
        struct encoder_init_data enc_init_data = { 0 };
+       struct panel_cntl_init_data panel_cntl_init_data = { 0 };
        struct integrated_info info = {{{ 0 }}};
        struct dc_bios *bios = init_params->dc->ctx->dc_bios;
        const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
        DC_LOGGER_INIT(dc_ctx->logger);
 
        link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
@@ -1278,23 +1375,27 @@ static bool dc_link_construct(
        link->ctx = dc_ctx;
        link->link_index = init_params->link_index;
 
-       memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides));
-       memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings));
+       memset(&link->preferred_training_settings, 0,
+              sizeof(struct dc_link_training_overrides));
+       memset(&link->preferred_link_setting, 0,
+              sizeof(struct dc_link_settings));
 
-       link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+       link->link_id =
+               bios->funcs->get_connector_id(bios, init_params->connector_index);
 
        if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
                dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
-                        __func__, init_params->connector_index,
-                        link->link_id.type, OBJECT_TYPE_CONNECTOR);
+                                    __func__, init_params->connector_index,
+                                    link->link_id.type, OBJECT_TYPE_CONNECTOR);
                goto create_fail;
        }
 
        if (link->dc->res_pool->funcs->link_init)
                link->dc->res_pool->funcs->link_init(link);
 
-       link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
-       if (link->hpd_gpio != NULL) {
+       link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                                     link->ctx->gpio_service);
+       if (link->hpd_gpio) {
                dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
                dal_gpio_unlock_pin(link->hpd_gpio);
                link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
@@ -1314,9 +1415,9 @@ static bool dc_link_construct(
                link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
                break;
        case CONNECTOR_ID_DISPLAY_PORT:
-               link->connector_signal =        SIGNAL_TYPE_DISPLAY_PORT;
+               link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
 
-               if (link->hpd_gpio != NULL)
+               if (link->hpd_gpio)
                        link->irq_source_hpd_rx =
                                        dal_irq_get_rx_source(link->hpd_gpio);
 
@@ -1324,42 +1425,60 @@ static bool dc_link_construct(
        case CONNECTOR_ID_EDP:
                link->connector_signal = SIGNAL_TYPE_EDP;
 
-               if (link->hpd_gpio != NULL) {
+               if (link->hpd_gpio) {
                        link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
                        link->irq_source_hpd_rx =
                                        dal_irq_get_rx_source(link->hpd_gpio);
                }
+
                break;
        case CONNECTOR_ID_LVDS:
                link->connector_signal = SIGNAL_TYPE_LVDS;
                break;
        default:
-               DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
+               DC_LOG_WARNING("Unsupported Connector type:%d!\n",
+                              link->link_id.id);
                goto create_fail;
        }
 
        /* TODO: #DAL3 Implement id to str function.*/
        LINK_INFO("Connector[%d] description:"
-                       "signal %d\n",
-                       init_params->connector_index,
-                       link->connector_signal);
+                 "signal %d\n",
+                 init_params->connector_index,
+                 link->connector_signal);
 
        ddc_service_init_data.ctx = link->ctx;
        ddc_service_init_data.id = link->link_id;
        ddc_service_init_data.link = link;
        link->ddc = dal_ddc_service_create(&ddc_service_init_data);
 
-       if (link->ddc == NULL) {
+       if (!link->ddc) {
                DC_ERROR("Failed to create ddc_service!\n");
                goto ddc_create_fail;
        }
 
        link->ddc_hw_inst =
-               dal_ddc_get_line(
-                       dal_ddc_service_get_ddc_pin(link->ddc));
+               dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
+
+
+       if (link->dc->res_pool->funcs->panel_cntl_create &&
+               (link->link_id.id == CONNECTOR_ID_EDP ||
+                       link->link_id.id == CONNECTOR_ID_LVDS)) {
+               panel_cntl_init_data.ctx = dc_ctx;
+               panel_cntl_init_data.inst = 0;
+               link->panel_cntl =
+                       link->dc->res_pool->funcs->panel_cntl_create(
+                                                               &panel_cntl_init_data);
+
+               if (link->panel_cntl == NULL) {
+                       DC_ERROR("Failed to create link panel_cntl!\n");
+                       goto panel_cntl_create_fail;
+               }
+       }
 
        enc_init_data.ctx = dc_ctx;
-       bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+       bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+                             &enc_init_data.encoder);
        enc_init_data.connector = link->link_id;
        enc_init_data.channel = get_ddc_line(link);
        enc_init_data.hpd_source = get_hpd_line(link);
@@ -1367,11 +1486,11 @@ static bool dc_link_construct(
        link->hpd_src = enc_init_data.hpd_source;
 
        enc_init_data.transmitter =
-                       translate_encoder_to_transmitter(enc_init_data.encoder);
-       link->link_enc = link->dc->res_pool->funcs->link_enc_create(
-                                                               &enc_init_data);
+               translate_encoder_to_transmitter(enc_init_data.encoder);
+       link->link_enc =
+               link->dc->res_pool->funcs->link_enc_create(&enc_init_data);
 
-       if (link->link_enc == NULL) {
+       if (!link->link_enc) {
                DC_ERROR("Failed to create link encoder!\n");
                goto link_enc_create_fail;
        }
@@ -1379,8 +1498,9 @@ static bool dc_link_construct(
        link->link_enc_hw_inst = link->link_enc->transmitter;
 
        for (i = 0; i < 4; i++) {
-               if (BP_RESULT_OK !=
-                               bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+               if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+                                            link->link_id, i,
+                                            &link->device_tag) != BP_RESULT_OK) {
                        DC_ERROR("Failed to find device tag!\n");
                        goto device_tag_fail;
                }
@@ -1388,13 +1508,14 @@ static bool dc_link_construct(
                /* Look for device tag that matches connector signal,
                 * CRT for rgb, LCD for other supported signal tyes
                 */
-               if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+               if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
+                                                     link->device_tag.dev_id))
                        continue;
-               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
-                       && link->connector_signal != SIGNAL_TYPE_RGB)
+               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
+                   link->connector_signal != SIGNAL_TYPE_RGB)
                        continue;
-               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
-                       && link->connector_signal == SIGNAL_TYPE_RGB)
+               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
+                   link->connector_signal == SIGNAL_TYPE_RGB)
                        continue;
                break;
        }
@@ -1406,16 +1527,16 @@ static bool dc_link_construct(
        for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
                struct external_display_path *path =
                        &info.ext_disp_conn_info.path[i];
-               if (path->device_connector_id.enum_id == link->link_id.enum_id
-                       && path->device_connector_id.id == link->link_id.id
-                       && path->device_connector_id.type == link->link_id.type) {
 
-                       if (link->device_tag.acpi_device != 0
-                               && path->device_acpi_enum == link->device_tag.acpi_device) {
+               if (path->device_connector_id.enum_id == link->link_id.enum_id &&
+                   path->device_connector_id.id == link->link_id.id &&
+                   path->device_connector_id.type == link->link_id.type) {
+                       if (link->device_tag.acpi_device != 0 &&
+                           path->device_acpi_enum == link->device_tag.acpi_device) {
                                link->ddi_channel_mapping = path->channel_mapping;
                                link->chip_caps = path->caps;
                        } else if (path->device_tag ==
-                                       link->device_tag.dev_id.raw_device_tag) {
+                                  link->device_tag.dev_id.raw_device_tag) {
                                link->ddi_channel_mapping = path->channel_mapping;
                                link->chip_caps = path->caps;
                        }
@@ -1431,15 +1552,20 @@ static bool dc_link_construct(
         */
        program_hpd_filter(link);
 
+       link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
        return true;
 device_tag_fail:
        link->link_enc->funcs->destroy(&link->link_enc);
 link_enc_create_fail:
+       if (link->panel_cntl != NULL)
+               link->panel_cntl->funcs->destroy(&link->panel_cntl);
+panel_cntl_create_fail:
        dal_ddc_service_destroy(&link->ddc);
 ddc_create_fail:
 create_fail:
 
-       if (link->hpd_gpio != NULL) {
+       if (link->hpd_gpio) {
                dal_gpio_destroy_irq(&link->hpd_gpio);
                link->hpd_gpio = NULL;
        }
@@ -2339,9 +2465,28 @@ enum dc_status dc_link_validate_mode_timing(
        return DC_OK;
 }
 
+static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+{
+       int i;
+       struct dc *dc = link->ctx->dc;
+       struct abm *abm = NULL;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
+               struct dc_stream_state *stream = pipe_ctx.stream;
+
+               if (stream && stream->link == link) {
+                       abm = pipe_ctx.stream_res.abm;
+                       break;
+               }
+       }
+       return abm;
+}
+
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-       struct abm *abm = link->ctx->dc->res_pool->abm;
+
+       struct abm *abm = get_abm_from_stream_res(link);
 
        if (abm == NULL || abm->funcs->get_current_backlight == NULL)
                return DC_ERROR_UNEXPECTED;
@@ -2349,71 +2494,63 @@ int dc_link_get_backlight_level(const struct dc_link *link)
        return (int) abm->funcs->get_current_backlight(abm);
 }
 
-bool dc_link_set_backlight_level(const struct dc_link *link,
-               uint32_t backlight_pwm_u16_16,
-               uint32_t frame_ramp)
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
 {
-       struct dc  *dc = link->ctx->dc;
-       struct abm *abm = dc->res_pool->abm;
-       struct dmcu *dmcu = dc->res_pool->dmcu;
-       unsigned int controller_id = 0;
-       bool use_smooth_brightness = true;
-       int i;
-       DC_LOGGER_INIT(link->ctx->logger);
+       struct abm *abm = get_abm_from_stream_res(link);
 
-       if ((dmcu == NULL) ||
-               (abm == NULL) ||
-               (abm->funcs->set_backlight_level_pwm == NULL))
-               return false;
+       if (abm == NULL || abm->funcs->get_target_backlight == NULL)
+               return DC_ERROR_UNEXPECTED;
 
-       use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+       return (int) abm->funcs->get_target_backlight(abm);
+}
 
-       DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
-                       backlight_pwm_u16_16, backlight_pwm_u16_16);
+static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
+{
+       int i;
+       struct dc *dc = link->ctx->dc;
+       struct pipe_ctx *pipe_ctx = NULL;
 
-       if (dc_is_embedded_signal(link->connector_signal)) {
-               for (i = 0; i < MAX_PIPES; i++) {
-                       if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
-                               if (dc->current_state->res_ctx.
-                                               pipe_ctx[i].stream->link
-                                               == link) {
-                                       /* DMCU -1 for all controller id values,
-                                        * therefore +1 here
-                                        */
-                                       controller_id =
-                                               dc->current_state->
-                                               res_ctx.pipe_ctx[i].stream_res.tg->inst +
-                                               1;
-
-                                       /* Disable brightness ramping when the display is blanked
-                                        * as it can hang the DMCU
-                                        */
-                                       if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
-                                               frame_ramp = 0;
-                               }
+       for (i = 0; i < MAX_PIPES; i++) {
+               if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+                       if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
+                               pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+                               break;
                        }
                }
-               abm->funcs->set_backlight_level_pwm(
-                               abm,
-                               backlight_pwm_u16_16,
-                               frame_ramp,
-                               controller_id,
-                               use_smooth_brightness);
        }
 
-       return true;
+       return pipe_ctx;
 }
 
-bool dc_link_set_abm_disable(const struct dc_link *link)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp)
 {
        struct dc  *dc = link->ctx->dc;
-       struct abm *abm = dc->res_pool->abm;
 
-       if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
-               return false;
+       DC_LOGGER_INIT(link->ctx->logger);
+       DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+                       backlight_pwm_u16_16, backlight_pwm_u16_16);
 
-       abm->funcs->set_abm_immediate_disable(abm);
+       if (dc_is_embedded_signal(link->connector_signal)) {
+               struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
 
+               if (pipe_ctx) {
+                       /* Disable brightness ramping when the display is blanked
+                        * as it can hang the DMCU
+                        */
+                       if (pipe_ctx->plane_state == NULL)
+                               frame_ramp = 0;
+               } else {
+                       ASSERT(false);
+                       return false;
+               }
+
+               dc->hwss.set_backlight_level(
+                               pipe_ctx,
+                               backlight_pwm_u16_16,
+                               frame_ramp);
+       }
        return true;
 }
 
@@ -2423,12 +2560,12 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
        struct dmcu *dmcu = dc->res_pool->dmcu;
        struct dmub_psr *psr = dc->res_pool->psr;
 
-       if (psr != NULL && link->psr_feature_enabled)
+       if (psr != NULL && link->psr_settings.psr_feature_enabled)
                psr->funcs->psr_enable(psr, allow_active);
-       else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+       else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
                dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
 
-       link->psr_allow_active = allow_active;
+       link->psr_settings.psr_allow_active = allow_active;
 
        return true;
 }
@@ -2439,9 +2576,9 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
        struct dmcu *dmcu = dc->res_pool->dmcu;
        struct dmub_psr *psr = dc->res_pool->psr;
 
-       if (psr != NULL && link->psr_feature_enabled)
+       if (psr != NULL && link->psr_settings.psr_feature_enabled)
                psr->funcs->psr_get_state(psr, psr_state);
-       else if (dmcu != NULL && link->psr_feature_enabled)
+       else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
                dmcu->funcs->get_psr_state(dmcu, psr_state);
 
        return true;
@@ -2612,14 +2749,14 @@ bool dc_link_setup_psr(struct dc_link *link,
        psr_context->frame_delay = 0;
 
        if (psr)
-               link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
+               link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
        else
-               link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+               link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
 
        /* psr_enabled == 0 indicates setup_psr did not succeed, but this
         * should not happen since firmware should be running at this point
         */
-       if (link->psr_feature_enabled == 0)
+       if (link->psr_settings.psr_feature_enabled == 0)
                ASSERT(0);
 
        return true;
@@ -2966,7 +3103,7 @@ void core_link_enable_stream(
        enum dc_status status;
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+       if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
                        dc_is_virtual_signal(pipe_ctx->stream->signal))
                return;
 
@@ -3040,6 +3177,18 @@ void core_link_enable_stream(
                if (pipe_ctx->stream->dpms_off)
                        return;
 
+               /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+                * link training). This is to make sure the bandwidth sent to DIG BE won't be
+                * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+                * will be automatically set at a later time when the video is enabled
+                * (DP_VID_STREAM_EN = 1).
+                */
+               if (pipe_ctx->stream->timing.flags.DSC) {
+                       if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+                                       dc_is_virtual_signal(pipe_ctx->stream->signal))
+                               dp_set_dsc_enable(pipe_ctx, true);
+               }
+
                status = enable_link(state, pipe_ctx);
 
                if (status != DC_OK) {
@@ -3067,11 +3216,6 @@ void core_link_enable_stream(
                                        CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
                                        COLOR_DEPTH_UNDEFINED);
 
-               if (pipe_ctx->stream->timing.flags.DSC) {
-                       if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
-                                       dc_is_virtual_signal(pipe_ctx->stream->signal))
-                               dp_set_dsc_enable(pipe_ctx, true);
-               }
                dc->hwss.enable_stream(pipe_ctx);
 
                /* Set DPS PPS SDP (AKA "info frames") */
@@ -3109,7 +3253,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->sink->link;
 
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+       if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
                        dc_is_virtual_signal(pipe_ctx->stream->signal))
                return;
 
index 256889eed93e37dc0ee49de0b68a0055058a18cd..aefd29a440b52060781753360b97cb2d61711b4c 100644 (file)
@@ -599,7 +599,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
        do {
                struct aux_payload current_payload;
                bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
-                       payload->length ? true : false;
+                       payload->length;
 
                current_payload.address = payload->address;
                current_payload.data = &payload->data[retrieved];
index aa3c45a69b5ec70ef4f021c25a9633612a77c4c0..1db592372435e818d09cd3bb273773858e0cc5da 100644 (file)
@@ -13,7 +13,6 @@
 #include "core_status.h"
 #include "dpcd_defs.h"
 
-#include "resource.h"
 #define DC_LOGGER \
        link->ctx->logger
 
@@ -1710,19 +1709,10 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
 
 static struct dc_link_settings get_max_link_cap(struct dc_link *link)
 {
-       /* Set Default link settings */
-       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
-                       LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
-
-       /* Higher link settings based on feature supported */
-       if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
-               max_link_cap.link_rate = LINK_RATE_HIGH2;
-
-       if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
-               max_link_cap.link_rate = LINK_RATE_HIGH3;
+       struct dc_link_settings max_link_cap = {0};
 
-       if (link->link_enc->funcs->get_max_link_cap)
-               link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+       /* get max link encoder capability */
+       link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
 
        /* Lower link settings based on sink's link cap */
        if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2426,7 +2416,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
 {
        union dpcd_psr_configuration psr_configuration;
 
-       if (!link->psr_feature_enabled)
+       if (!link->psr_settings.psr_feature_enabled)
                return false;
 
        dm_helpers_dp_read_dpcd(
@@ -2908,6 +2898,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                                        sizeof(hpd_irq_dpcd_data),
                                        "Status: ");
 
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.blank_stream(pipe_ctx);
+               }
+
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
@@ -2927,6 +2923,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                        dc_link_reallocate_mst_payload(link);
 
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+               }
+
                status = false;
                if (out_link_loss)
                        *out_link_loss = true;
@@ -4227,6 +4229,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 void dpcd_set_source_specific_data(struct dc_link *link)
 {
        const uint32_t post_oui_delay = 30; // 30ms
+       uint8_t dspc = 0;
+       enum dc_status ret;
+
+       ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
+                                 sizeof(dspc));
+
+       if (ret != DC_OK) {
+               DC_LOG_ERROR("Error in DP aux read transaction,"
+                            " not writing source specific data\n");
+               return;
+       }
+
+       /* Return if OUI unsupported */
+       if (!(dspc & DP_OUI_SUPPORT))
+               return;
 
        if (!link->dc->vendor_signature.is_valid) {
                struct dpcd_amd_signature amd_signature;
index 51e0ee6e769507f04e7f8e54c734963a0cef6626..6590f51caefabb9d9b4749539638646e2179f8b3 100644 (file)
@@ -400,7 +400,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
        struct dc_stream_state *stream = pipe_ctx->stream;
        bool result = false;
 
-       if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+       if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
                result = true;
        else
                result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
index f4bcc71b2920c396cac6ff55186893c5720cdb69..cb5d11f11cad5130f47d7ee76a2d9f368b1d1b3b 100644 (file)
@@ -692,6 +692,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
        /* Round up, assume original video size always even dimensions */
        data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
        data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+       data->viewport_unadjusted = data->viewport;
+       data->viewport_c_unadjusted = data->viewport_c;
 }
 
 static void calculate_recout(struct pipe_ctx *pipe_ctx)
@@ -1061,8 +1064,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
        calculate_viewport(pipe_ctx);
 
-       if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
-               pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+       if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
+               pipe_ctx->plane_res.scl_data.viewport.width < 12) {
                if (store_h_border_left) {
                        restore_border_left_from_dst(pipe_ctx,
                                store_h_border_left);
@@ -1358,9 +1361,6 @@ bool dc_add_plane_to_context(
        dc_plane_state_retain(plane_state);
 
        while (head_pipe) {
-               tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
-               ASSERT(tail_pipe);
-
                free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
 
        #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1378,6 +1378,8 @@ bool dc_add_plane_to_context(
                free_pipe->plane_state = plane_state;
 
                if (head_pipe != free_pipe) {
+                       tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
+                       ASSERT(tail_pipe);
                        free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
                        free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
                        free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1545,35 +1547,6 @@ bool dc_add_all_planes_for_stream(
        return add_all_planes_for_stream(dc, stream, &set, 1, context);
 }
 
-
-static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
-       struct dc_stream_state *new_stream)
-{
-       if (cur_stream == NULL)
-               return true;
-
-       if (memcmp(&cur_stream->hdr_static_metadata,
-                       &new_stream->hdr_static_metadata,
-                       sizeof(struct dc_info_packet)) != 0)
-               return true;
-
-       return false;
-}
-
-static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream,
-               struct dc_stream_state *new_stream)
-{
-       if (cur_stream == NULL)
-               return true;
-
-       if (memcmp(&cur_stream->vsc_infopacket,
-                       &new_stream->vsc_infopacket,
-                       sizeof(struct dc_info_packet)) != 0)
-               return true;
-
-       return false;
-}
-
 static bool is_timing_changed(struct dc_stream_state *cur_stream,
                struct dc_stream_state *new_stream)
 {
@@ -1608,15 +1581,9 @@ static bool are_stream_backends_same(
        if (is_timing_changed(stream_a, stream_b))
                return false;
 
-       if (is_hdr_static_meta_changed(stream_a, stream_b))
-               return false;
-
        if (stream_a->dpms_off != stream_b->dpms_off)
                return false;
 
-       if (is_vsc_info_packet_changed(stream_a, stream_b))
-               return false;
-
        return true;
 }
 
@@ -1756,21 +1723,6 @@ static struct audio *find_first_free_audio(
        return 0;
 }
 
-bool resource_is_stream_unchanged(
-       struct dc_state *old_context, struct dc_stream_state *stream)
-{
-       int i;
-
-       for (i = 0; i < old_context->stream_count; i++) {
-               struct dc_stream_state *old_stream = old_context->streams[i];
-
-               if (are_stream_backends_same(old_stream, stream))
-                               return true;
-       }
-
-       return false;
-}
-
 /**
  * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
  */
@@ -2025,17 +1977,6 @@ enum dc_status resource_map_pool_resources(
        int pipe_idx = -1;
        struct dc_bios *dcb = dc->ctx->dc_bios;
 
-       /* TODO Check if this is needed */
-       /*if (!resource_is_stream_unchanged(old_context, stream)) {
-                       if (stream != NULL && old_context->streams[i] != NULL) {
-                               stream->bit_depth_params =
-                                               old_context->streams[i]->bit_depth_params;
-                               stream->clamping = old_context->streams[i]->clamping;
-                               continue;
-                       }
-               }
-       */
-
        calculate_phy_pix_clks(stream);
 
        /* TODO: Check Linux */
@@ -2718,15 +2659,9 @@ bool pipe_need_reprogram(
        if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
                return true;
 
-       if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
-               return true;
-
        if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
                return true;
 
-       if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
-               return true;
-
        if (false == pipe_ctx_old->stream->link->link_state_valid &&
                false == pipe_ctx_old->stream->dpms_off)
                return true;
index a249a0e5edd0ff6b67894af5b557c561ac8f6200..9e16af22e4aafc4867b91967ef330dc840314283 100644 (file)
@@ -54,6 +54,7 @@ static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_da
        sink->ctx = link->ctx;
        sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
        sink->converter_disable_audio = init_params->converter_disable_audio;
+       sink->is_mst_legacy = init_params->sink_is_legacy;
        sink->dc_container_id = NULL;
        sink->sink_id = init_params->link->ctx->dc_sink_id_count;
        // increment dc_sink_id_count because we don't want two sinks with same ID
index 6ddbb00ed37a5aa5f0b53a525995bb327bc882b7..4f0e7203dba4f41fd6fa4419b11f99aefc7d97c1 100644 (file)
@@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
        return dc_stream_get_status_from_state(dc->current_state, stream);
 }
 
-static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
-{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       unsigned int vupdate_line;
-       unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       unsigned int us_per_line;
-
-       if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
-                       ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
-
-               vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
-               if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
-                       return;
-
-               if (vpos >= vupdate_line)
-                       return;
-
-               us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
-               lines_to_vupdate = vupdate_line - vpos;
-               us_to_vupdate = lines_to_vupdate * us_per_line;
-
-               /* 70 us is a conservative estimate of cursor update time*/
-               if (us_to_vupdate < 70)
-                       udelay(us_to_vupdate);
-       }
-#endif
-}
 
 /**
  * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
 
                if (!pipe_to_program) {
                        pipe_to_program = pipe_ctx;
-
-                       delay_cursor_until_vupdate(pipe_ctx, dc);
-                       dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
                }
 
                dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
        }
 
        if (pipe_to_program)
-               dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+               dc->hwss.cursor_lock(dc, pipe_to_program, false);
 
        return true;
 }
@@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
 
                if (!pipe_to_program) {
                        pipe_to_program = pipe_ctx;
-
-                       delay_cursor_until_vupdate(pipe_ctx, dc);
-                       dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
                }
 
                dc->hwss.set_cursor_position(pipe_ctx);
        }
 
        if (pipe_to_program)
-               dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+               dc->hwss.cursor_lock(dc, pipe_to_program, false);
 
        return true;
 }
index 1935cf6601ebd03bb6dbd06689343fe82d9efe1a..85908561c7418a4a60ea1a0bfc44d87b5c395f5f 100644 (file)
@@ -29,6 +29,9 @@
 #include "dc_types.h"
 #include "grph_object_defs.h"
 #include "logger_types.h"
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+#include "hdcp_types.h"
+#endif
 #include "gpio_types.h"
 #include "link_service_types.h"
 #include "grph_object_ctrl_defs.h"
@@ -39,7 +42,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.76"
+#define DC_VER "3.2.84"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -95,6 +98,49 @@ struct dc_plane_cap {
        } max_downscale_factor;
 };
 
+// Color management caps (DPP and MPC)
+struct rom_curve_caps {
+       uint16_t srgb : 1;
+       uint16_t bt2020 : 1;
+       uint16_t gamma2_2 : 1;
+       uint16_t pq : 1;
+       uint16_t hlg : 1;
+};
+
+struct dpp_color_caps {
+       uint16_t dcn_arch : 1; // all DCE generations treated the same
+       // input lut is different than most LUTs, just plain 256-entry lookup
+       uint16_t input_lut_shared : 1; // shared with DGAM
+       uint16_t icsc : 1;
+       uint16_t dgam_ram : 1;
+       uint16_t post_csc : 1; // before gamut remap
+       uint16_t gamma_corr : 1;
+
+       // hdr_mult and gamut remap always available in DPP (in that order)
+       // 3d lut implies shaper LUT,
+       // it may be shared with MPC - check MPC:shared_3d_lut flag
+       uint16_t hw_3d_lut : 1;
+       uint16_t ogam_ram : 1; // blnd gam
+       uint16_t ocsc : 1;
+       struct rom_curve_caps dgam_rom_caps;
+       struct rom_curve_caps ogam_rom_caps;
+};
+
+struct mpc_color_caps {
+       uint16_t gamut_remap : 1;
+       uint16_t ogam_ram : 1;
+       uint16_t ocsc : 1;
+       uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
+       uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
+
+       struct rom_curve_caps ogam_rom_caps;
+};
+
+struct dc_color_caps {
+       struct dpp_color_caps dpp;
+       struct mpc_color_caps mpc;
+};
+
 struct dc_caps {
        uint32_t max_streams;
        uint32_t max_links;
@@ -117,9 +163,9 @@ struct dc_caps {
        bool psp_setup_panel_mode;
        bool extended_aux_timeout_support;
        bool dmcub_support;
-       bool hw_3d_lut;
        enum dp_protocol_version max_dp_protocol_version;
        struct dc_plane_cap planes[MAX_PLANES];
+       struct dc_color_caps color;
 };
 
 struct dc_bug_wa {
@@ -230,7 +276,8 @@ struct dc_config {
        bool forced_clocks;
        bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
        bool multi_mon_pp_mclk_switch;
-       bool psr_on_dmub;
+       bool disable_dmcu;
+       bool enable_4to1MPC;
 };
 
 enum visual_confirm {
@@ -238,6 +285,7 @@ enum visual_confirm {
        VISUAL_CONFIRM_SURFACE = 1,
        VISUAL_CONFIRM_HDR = 2,
        VISUAL_CONFIRM_MPCTREE = 4,
+       VISUAL_CONFIRM_PSR = 5,
 };
 
 enum dcc_option {
@@ -429,6 +477,7 @@ struct dc_debug_options {
        bool enable_dmcub_surface_flip;
        bool usbc_combo_phy_reset_wa;
        bool disable_dsc;
+       bool enable_dram_clock_change_one_display_vactive;
 };
 
 struct dc_debug_data {
@@ -474,6 +523,7 @@ struct dc_bounding_box_overrides {
        int urgent_latency_ns;
        int percent_of_ideal_drambw;
        int dram_clock_change_latency_ns;
+       int dummy_clock_change_latency_ns;
        /* This forces a hard min on the DCFCLK we use
         * for DML.  Unlike the debug option for forcing
         * DCFCLK, this override affects watermark calculations
@@ -987,6 +1037,7 @@ struct dpcd_caps {
        union dpcd_fec_capability fec_cap;
        struct dpcd_dsc_capabilities dsc_caps;
        struct dc_lttpr_caps lttpr_caps;
+       struct psr_caps psr_caps;
 
 };
 
@@ -1004,6 +1055,35 @@ union dpcd_sink_ext_caps {
        uint8_t raw;
 };
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+union hdcp_rx_caps {
+       struct {
+               uint8_t version;
+               uint8_t reserved;
+               struct {
+                       uint8_t repeater        : 1;
+                       uint8_t hdcp_capable    : 1;
+                       uint8_t reserved        : 6;
+               } byte0;
+       } fields;
+       uint8_t raw[3];
+};
+
+union hdcp_bcaps {
+       struct {
+               uint8_t HDCP_CAPABLE:1;
+               uint8_t REPEATER:1;
+               uint8_t RESERVED:6;
+       } bits;
+       uint8_t raw;
+};
+
+struct hdcp_caps {
+       union hdcp_rx_caps rx_caps;
+       union hdcp_bcaps bcaps;
+};
+#endif
+
 #include "dc_link.h"
 
 /*******************************************************************************
@@ -1046,7 +1126,7 @@ struct dc_sink {
        void *priv;
        struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
        bool converter_disable_audio;
-
+       bool is_mst_legacy;
        struct dc_sink_dsc_caps dsc_caps;
        struct dc_sink_fec_caps fec_caps;
 
@@ -1073,6 +1153,7 @@ struct dc_sink_init_data {
        struct dc_link *link;
        uint32_t dongle_max_pix_clk;
        bool converter_disable_audio;
+       bool sink_is_legacy;
 };
 
 struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
@@ -1104,9 +1185,16 @@ void dc_set_power_state(
                struct dc *dc,
                enum dc_acpi_cm_power_state power_state);
 void dc_resume(struct dc *dc);
-unsigned int dc_get_current_backlight_pwm(struct dc *dc);
-unsigned int dc_get_target_backlight_pwm(struct dc *dc);
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+/*
+ * HDCP Interfaces
+ */
+enum hdcp_message_status dc_process_hdcp_msg(
+               enum signal_type signal,
+               struct dc_link *link,
+               struct hdcp_protection_message *message_info);
+#endif
 bool dc_is_dmcu_initialized(struct dc *dc);
 
 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
index 59c298a6484f396c3872dcb9d6ca32b6baa12a32..eea2429ac67d8a93273e65d1820456795d692ca4 100644 (file)
@@ -25,7 +25,7 @@
 
 #include "dc.h"
 #include "dc_dmub_srv.h"
-#include "../dmub/inc/dmub_srv.h"
+#include "../dmub/dmub_srv.h"
 
 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
                                  struct dmub_srv *dmub)
@@ -58,7 +58,7 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
 }
 
 void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
-                          struct dmub_cmd_header *cmd)
+                          union dmub_rb_cmd *cmd)
 {
        struct dmub_srv *dmub = dc_dmub_srv->dmub;
        struct dc_context *dc_ctx = dc_dmub_srv->ctx;
index 754b6077539cf8a0b8f6ff3778106cc7d288f6b3..a3a09ccb6d266c20514eb30b8a03834e840737ca 100644 (file)
 #define _DMUB_DC_SRV_H_
 
 #include "os_types.h"
-#include "../dmub/inc/dmub_cmd.h"
+#include "dmub/dmub_srv.h"
 
 struct dmub_srv;
-struct dmub_cmd_header;
 
 struct dc_reg_helper_state {
        bool gather_in_progress;
@@ -49,7 +48,7 @@ struct dc_dmub_srv {
 };
 
 void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
-                          struct dmub_cmd_header *cmd);
+                          union dmub_rb_cmd *cmd);
 
 void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
 
index bb2730e9521ed3320f8de0ee8239cdad97f53445..af177c087d3b489057f6b30a13dd725a1dd21897 100644 (file)
@@ -740,5 +740,11 @@ struct dpcd_dsc_capabilities {
        union dpcd_dsc_ext_capabilities dsc_ext_caps;
 };
 
+/* These parameters are from PSR capabilities reported by Sink DPCD */
+struct psr_caps {
+       unsigned char psr_version;
+       unsigned int psr_rfb_setup_time;
+       bool psr_exit_link_training_required;
+};
 
 #endif /* DC_DP_TYPES_H */
index 737048d8a96c16cf2bf2b89763e243a80d2af8db..85a0170be5449158e03c3b997b4cb788b54320e4 100644 (file)
@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
        gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
 
-       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
 
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
 
@@ -73,7 +73,7 @@ static inline void submit_dmub_burst_write(
        gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
 
-       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
 
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
 
@@ -92,7 +92,7 @@ static inline void submit_dmub_reg_wait(
        gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
 
-       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
 
        memset(cmd_buf, 0, sizeof(*cmd_buf));
        offload->reg_seq_count = 0;
index 00ff5e98278c2f731957a08fcf0c4bb32ca5ed2e..f63fc25aa6c5475bad8adf3df8a36f4b356e1bc2 100644 (file)
@@ -66,6 +66,22 @@ struct time_stamp {
 struct link_trace {
        struct time_stamp time_stamp;
 };
+
+/* PSR feature flags */
+struct psr_settings {
+       bool psr_feature_enabled;               // PSR is supported by sink
+       bool psr_allow_active;                  // PSR is currently active
+       enum dc_psr_version psr_version;                // Internal PSR version, determined based on DPCD
+
+       /* These parameters are calculated in Driver,
+        * based on display timing and Sink capabilities.
+        * If VBLANK region is too small and Sink takes a long time
+        * to set up RFB, it may take an extra frame to enter PSR state.
+        */
+       bool psr_frame_capture_indication_req;
+       unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
 /*
  * A link contains one or more sinks and their connected status.
  * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -118,6 +134,7 @@ struct dc_link {
 
        struct dc_context *ctx;
 
+       struct panel_cntl *panel_cntl;
        struct link_encoder *link_enc;
        struct graphics_object_id link_id;
        union ddi_channel_mapping ddi_channel_mapping;
@@ -126,11 +143,14 @@ struct dc_link {
        uint32_t dongle_max_pix_clk;
        unsigned short chip_caps;
        unsigned int dpcd_sink_count;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+       struct hdcp_caps hdcp_caps;
+#endif
        enum edp_revision edp_revision;
-       bool psr_feature_enabled;
-       bool psr_allow_active;
        union dpcd_sink_ext_caps dpcd_sink_ext_caps;
 
+       struct psr_settings psr_settings;
+
        /* MST record stream using this link */
        struct link_flags {
                bool dp_keep_receiver_powered;
@@ -197,7 +217,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link);
 
 int dc_link_get_backlight_level(const struct dc_link *dc_link);
 
-bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
 
 bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
 
@@ -290,6 +310,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
  * DPCD access interfaces
  */
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+bool dc_link_is_hdcp14(struct dc_link *link);
+bool dc_link_is_hdcp22(struct dc_link *link);
+#endif
 void dc_link_set_drive_settings(struct dc *dc,
                                struct link_training_settings *lt_settings,
                                const struct dc_link *link);
index a5c7ef47b8d3c7d680ebe54e70c273323c2c8a6d..49aad691e687e6d91cf85131bc1322b2b3cc18b5 100644 (file)
@@ -167,8 +167,6 @@ struct dc_stream_state {
 
        /* TODO: custom INFO packets */
        /* TODO: ABM info (DMCU) */
-       /* PSR info */
-       unsigned char psr_version;
        /* TODO: CEA VIC */
 
        /* DMCU info */
index 0d210104ba0a10bf66cabd196b0e305dbf4c2e18..f236da1c1859e0c4229cda26bd897ac73116e9c2 100644 (file)
@@ -862,4 +862,9 @@ struct dsc_dec_dpcd_caps {
        uint32_t branch_max_line_width;
 };
 
+enum dc_psr_version {
+       DC_PSR_VERSION_1                        = 0,
+       DC_PSR_VERSION_UNSUPPORTED              = 0xFFFFFFFF,
+};
+
 #endif /* DC_TYPES_H_ */
index fbfcff700971cae06a816eb2463d9b555a018df1..f704a8fd52e81bcbf0875581c33fa2b0db525685 100644 (file)
@@ -29,7 +29,7 @@
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
 dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
 
index b8a3fc505c9b6e1a881209bd9569a8e262b92a19..4e87e70237e3da3cb551a1365d481d8f22564f15 100644 (file)
@@ -55,7 +55,7 @@
 
 #define MCP_DISABLE_ABM_IMMEDIATELY 255
 
-static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
        uint32_t rampingBoundary = 0xFFFF;
@@ -83,125 +83,12 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
        return true;
 }
 
-static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
-{
-       uint64_t current_backlight;
-       uint32_t round_result;
-       uint32_t pwm_period_cntl, bl_period, bl_int_count;
-       uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
-       uint32_t bl_period_mask, bl_pwm_mask;
-
-       pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
-       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
-       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
-
-       bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
-       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
-       REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
-
-       if (bl_int_count == 0)
-               bl_int_count = 16;
-
-       bl_period_mask = (1 << bl_int_count) - 1;
-       bl_period &= bl_period_mask;
-
-       bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
-
-       if (fractional_duty_cycle_en == 0)
-               bl_pwm &= bl_pwm_mask;
-       else
-               bl_pwm &= 0xFFFF;
-
-       current_backlight = bl_pwm << (1 + bl_int_count);
-
-       if (bl_period == 0)
-               bl_period = 0xFFFF;
-
-       current_backlight = div_u64(current_backlight, bl_period);
-       current_backlight = (current_backlight + 1) >> 1;
-
-       current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
-       round_result = (round_result >> (bl_int_count-1)) & 1;
-
-       current_backlight >>= bl_int_count;
-       current_backlight += round_result;
-
-       return (uint32_t)(current_backlight);
-}
-
-static void driver_set_backlight_level(struct dce_abm *abm_dce,
-               uint32_t backlight_pwm_u16_16)
-{
-       uint32_t backlight_16bit;
-       uint32_t masked_pwm_period;
-       uint8_t bit_count;
-       uint64_t active_duty_cycle;
-       uint32_t pwm_period_bitcnt;
-
-       /*
-        * 1. Find  16 bit backlight active duty cycle, where 0 <= backlight
-        * active duty cycle <= backlight period
-        */
-
-       /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
-        */
-       REG_GET_2(BL_PWM_PERIOD_CNTL,
-                       BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
-                       BL_PWM_PERIOD, &masked_pwm_period);
-
-       if (pwm_period_bitcnt == 0)
-               bit_count = 16;
-       else
-               bit_count = pwm_period_bitcnt;
-
-       /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
-       masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
-
-       /* 1.2 Calculate integer active duty cycle required upper 16 bits
-        * contain integer component, lower 16 bits contain fractional component
-        * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
-        */
-       active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
-
-       /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
-        * components shift by bitCount then mask 16 bits and add rounding bit
-        * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
-        */
-       backlight_16bit = active_duty_cycle >> bit_count;
-       backlight_16bit &= 0xFFFF;
-       backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
-
-       /*
-        * 2. Program register with updated value
-        */
-
-       /* 2.1 Lock group 2 backlight registers */
-
-       REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
-                       BL_PWM_GRP1_REG_LOCK, 1);
-
-       // 2.2 Write new active duty cycle
-       REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
-
-       /* 2.3 Unlock group 2 backlight registers */
-       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_REG_LOCK, 0);
-
-       /* 3 Wait for pending bit to be cleared */
-       REG_WAIT(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
-                       1, 10000);
-}
-
 static void dmcu_set_backlight_level(
        struct dce_abm *abm_dce,
        uint32_t backlight_pwm_u16_16,
        uint32_t frame_ramp,
-       uint32_t controller_id)
+       uint32_t controller_id,
+       uint32_t panel_id)
 {
        unsigned int backlight_8_bit = 0;
        uint32_t s2;
@@ -213,7 +100,7 @@ static void dmcu_set_backlight_level(
                // Take MSB of fractional part since backlight is not max
                backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
 
-       dce_abm_set_pipe(&abm_dce->base, controller_id);
+       dce_abm_set_pipe(&abm_dce->base, controller_id, panel_id);
 
        /* waitDMCUReadyForCmd */
        REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -248,10 +135,9 @@ static void dmcu_set_backlight_level(
                        0, 1, 80000);
 }
 
-static void dce_abm_init(struct abm *abm)
+static void dce_abm_init(struct abm *abm, uint32_t backlight)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-       unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
 
        REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
        REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -331,86 +217,12 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
        return true;
 }
 
-static bool dce_abm_immediate_disable(struct abm *abm)
+static bool dce_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
 {
-       struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-
        if (abm->dmcu_is_running == false)
                return true;
 
-       dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
-
-       abm->stored_backlight_registers.BL_PWM_CNTL =
-               REG_READ(BL_PWM_CNTL);
-       abm->stored_backlight_registers.BL_PWM_CNTL2 =
-               REG_READ(BL_PWM_CNTL2);
-       abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
-               REG_READ(BL_PWM_PERIOD_CNTL);
-
-       REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
-               &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
-       return true;
-}
-
-static bool dce_abm_init_backlight(struct abm *abm)
-{
-       struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-       uint32_t value;
-
-       /* It must not be 0, so we have to restore them
-        * Bios bug w/a - period resets to zero,
-        * restoring to cache values which is always correct
-        */
-       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
-       if (value == 0 || value == 1) {
-               if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
-                       REG_WRITE(BL_PWM_CNTL,
-                               abm->stored_backlight_registers.BL_PWM_CNTL);
-                       REG_WRITE(BL_PWM_CNTL2,
-                               abm->stored_backlight_registers.BL_PWM_CNTL2);
-                       REG_WRITE(BL_PWM_PERIOD_CNTL,
-                               abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
-                       REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
-                               BL_PWM_REF_DIV,
-                               abm->stored_backlight_registers.
-                               LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
-               } else {
-                       /* TODO: Note: This should not really happen since VBIOS
-                        * should have initialized PWM registers on boot.
-                        */
-                       REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
-                       REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
-               }
-       } else {
-               abm->stored_backlight_registers.BL_PWM_CNTL =
-                               REG_READ(BL_PWM_CNTL);
-               abm->stored_backlight_registers.BL_PWM_CNTL2 =
-                               REG_READ(BL_PWM_CNTL2);
-               abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
-                               REG_READ(BL_PWM_PERIOD_CNTL);
-
-               REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
-                               &abm->stored_backlight_registers.
-                               LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
-       }
-
-       /* Have driver take backlight control
-        * TakeBacklightControl(true)
-        */
-       value = REG_READ(BIOS_SCRATCH_2);
-       value |= ATOM_S2_VRI_BRIGHT_ENABLE;
-       REG_WRITE(BIOS_SCRATCH_2, value);
-
-       /* Enable the backlight output */
-       REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
-
-       /* Disable fractional pwm if configured */
-       REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
-                  abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
-
-       /* Unlock group 2 backlight registers */
-       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_REG_LOCK, 0);
+       dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY, panel_inst);
 
        return true;
 }
@@ -420,21 +232,18 @@ static bool dce_abm_set_backlight_level_pwm(
                unsigned int backlight_pwm_u16_16,
                unsigned int frame_ramp,
                unsigned int controller_id,
-               bool use_smooth_brightness)
+               unsigned int panel_inst)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
        DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
                        backlight_pwm_u16_16, backlight_pwm_u16_16);
 
-       /* If DMCU is in reset state, DMCU is uninitialized */
-       if (use_smooth_brightness)
-               dmcu_set_backlight_level(abm_dce,
-                               backlight_pwm_u16_16,
-                               frame_ramp,
-                               controller_id);
-       else
-               driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
+       dmcu_set_backlight_level(abm_dce,
+                       backlight_pwm_u16_16,
+                       frame_ramp,
+                       controller_id,
+                       panel_inst);
 
        return true;
 }
@@ -442,12 +251,12 @@ static bool dce_abm_set_backlight_level_pwm(
 static const struct abm_funcs dce_funcs = {
        .abm_init = dce_abm_init,
        .set_abm_level = dce_abm_set_level,
-       .init_backlight = dce_abm_init_backlight,
        .set_pipe = dce_abm_set_pipe,
        .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
        .get_current_backlight = dce_abm_get_current_backlight,
        .get_target_backlight = dce_abm_get_target_backlight,
-       .set_abm_immediate_disable = dce_abm_immediate_disable
+       .init_abm_config = NULL,
+       .set_abm_immediate_disable = dce_abm_immediate_disable,
 };
 
 static void dce_abm_construct(
@@ -461,10 +270,6 @@ static void dce_abm_construct(
 
        base->ctx = ctx;
        base->funcs = &dce_funcs;
-       base->stored_backlight_registers.BL_PWM_CNTL = 0;
-       base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
-       base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
-       base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
        base->dmcu_is_running = false;
 
        abm_dce->regs = regs;
index ba0caaffa24bf37add2e3b654388dfc5df6d40c7..9718a4823372c12c47042498d577bf51b3bfbeba 100644 (file)
 #include "abm.h"
 
 #define ABM_COMMON_REG_LIST_DCE_BASE() \
-       SR(BL_PWM_PERIOD_CNTL), \
-       SR(BL_PWM_CNTL), \
-       SR(BL_PWM_CNTL2), \
-       SR(BL_PWM_GRP1_REG_LOCK), \
-       SR(LVTMA_PWRSEQ_REF_DIV), \
        SR(MASTER_COMM_CNTL_REG), \
        SR(MASTER_COMM_CMD_REG), \
        SR(MASTER_COMM_DATA_REG1)
        .field_name = reg_name ## __ ## field_name ## post_fix
 
 #define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
-       ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
-       ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
-       ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
-       ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
-       ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
-       ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
-       ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
-       ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
-       ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
        ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
        ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
        ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
        type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
        type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
        type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
-       type BL_PWM_PERIOD; \
-       type BL_PWM_PERIOD_BITCNT; \
-       type BL_ACTIVE_INT_FRAC_CNT; \
-       type BL_PWM_FRACTIONAL_EN; \
        type MASTER_COMM_INTERRUPT; \
        type MASTER_COMM_CMD_REG_BYTE0; \
        type MASTER_COMM_CMD_REG_BYTE1; \
-       type MASTER_COMM_CMD_REG_BYTE2; \
-       type BL_PWM_REF_DIV; \
-       type BL_PWM_EN; \
-       type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
-       type BL_PWM_GRP1_REG_LOCK; \
-       type BL_PWM_GRP1_REG_UPDATE_PENDING
+       type MASTER_COMM_CMD_REG_BYTE2
 
 struct dce_abm_shift {
        ABM_REG_FIELD_LIST(uint8_t);
@@ -201,10 +178,6 @@ struct dce_abm_mask {
 };
 
 struct dce_abm_registers {
-       uint32_t BL_PWM_PERIOD_CNTL;
-       uint32_t BL_PWM_CNTL;
-       uint32_t BL_PWM_CNTL2;
-       uint32_t LVTMA_PWRSEQ_REF_DIV;
        uint32_t DC_ABM1_HG_SAMPLE_RATE;
        uint32_t DC_ABM1_LS_SAMPLE_RATE;
        uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
@@ -219,7 +192,6 @@ struct dce_abm_registers {
        uint32_t MASTER_COMM_CMD_REG;
        uint32_t MASTER_COMM_DATA_REG1;
        uint32_t BIOS_SCRATCH_2;
-       uint32_t BL_PWM_GRP1_REG_LOCK;
 };
 
 struct dce_abm {
index 2e992fbc0d71844f0d0008cf5afc76cc19aef242..d2ad0504b0de10370768615fabec9318b1490ccb 100644 (file)
@@ -1014,39 +1014,6 @@ struct pixel_rate_range_table_entry {
        unsigned short div_factor;
 };
 
-static const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
-       // /1.001 rates
-       {25170, 25180, 25200, 1000, 1001},      //25.2MHz   ->   25.17
-       {59340, 59350, 59400, 1000, 1001},      //59.4Mhz   ->   59.340
-       {74170, 74180, 74250, 1000, 1001},      //74.25Mhz  ->   74.1758
-       {125870, 125880, 126000, 1000, 1001},   //126Mhz    ->  125.87
-       {148350, 148360, 148500, 1000, 1001},   //148.5Mhz  ->  148.3516
-       {167830, 167840, 168000, 1000, 1001},   //168Mhz    ->  167.83
-       {222520, 222530, 222750, 1000, 1001},   //222.75Mhz ->  222.527
-       {257140, 257150, 257400, 1000, 1001},   //257.4Mhz  ->  257.1429
-       {296700, 296710, 297000, 1000, 1001},   //297Mhz    ->  296.7033
-       {342850, 342860, 343200, 1000, 1001},   //343.2Mhz  ->  342.857
-       {395600, 395610, 396000, 1000, 1001},   //396Mhz    ->  395.6
-       {409090, 409100, 409500, 1000, 1001},   //409.5Mhz  ->  409.091
-       {445050, 445060, 445500, 1000, 1001},   //445.5Mhz  ->  445.055
-       {467530, 467540, 468000, 1000, 1001},   //468Mhz    ->  467.5325
-       {519230, 519240, 519750, 1000, 1001},   //519.75Mhz ->  519.231
-       {525970, 525980, 526500, 1000, 1001},   //526.5Mhz  ->  525.974
-       {545450, 545460, 546000, 1000, 1001},   //546Mhz    ->  545.455
-       {593400, 593410, 594000, 1000, 1001},   //594Mhz    ->  593.4066
-       {623370, 623380, 624000, 1000, 1001},   //624Mhz    ->  623.377
-       {692300, 692310, 693000, 1000, 1001},   //693Mhz    ->  692.308
-       {701290, 701300, 702000, 1000, 1001},   //702Mhz    ->  701.2987
-       {791200, 791210, 792000, 1000, 1001},   //792Mhz    ->  791.209
-       {890100, 890110, 891000, 1000, 1001},   //891Mhz    ->  890.1099
-       {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz   -> 1186.8131
-
-       // *1.001 rates
-       {27020, 27030, 27000, 1001, 1000}, //27Mhz
-       {54050, 54060, 54000, 1001, 1000}, //54Mhz
-       {108100, 108110, 108000, 1001, 1000},//108Mhz
-};
-
 static bool dcn20_program_pix_clk(
                struct clock_source *clock_source,
                struct pixel_clk_params *pix_clk_params,
index c5aa1f48593a6eba67b2019b31be21253492bc70..5479d959ec6269805c000476e3899f737945340b 100644 (file)
 
 #include "dc_types.h"
 
-#define BL_REG_LIST()\
-       SR(LVTMA_PWRSEQ_CNTL), \
-       SR(LVTMA_PWRSEQ_STATE)
-
 #define HWSEQ_DCEF_REG_LIST_DCE8() \
        .DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
        .DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
        SRII(BLND_CONTROL, BLND, 0),\
        SRII(BLND_CONTROL, BLND, 1),\
        SR(BLNDV_CONTROL),\
-       HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
-       BL_REG_LIST()
+       HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
 
 #define HWSEQ_DCE8_REG_LIST() \
        HWSEQ_DCEF_REG_LIST_DCE8(), \
        HWSEQ_BLND_REG_LIST(), \
-       HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
-       BL_REG_LIST()
+       HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
 
 #define HWSEQ_DCE10_REG_LIST() \
        HWSEQ_DCEF_REG_LIST(), \
        HWSEQ_BLND_REG_LIST(), \
-       HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
-       BL_REG_LIST()
+       HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
 
 #define HWSEQ_ST_REG_LIST() \
        HWSEQ_DCE11_REG_LIST_BASE(), \
        SR(DCHUB_FB_LOCATION),\
        SR(DCHUB_AGP_BASE),\
        SR(DCHUB_AGP_BOT),\
-       SR(DCHUB_AGP_TOP), \
-       BL_REG_LIST()
+       SR(DCHUB_AGP_TOP)
 
 #define HWSEQ_VG20_REG_LIST() \
        HWSEQ_DCE120_REG_LIST(),\
 #define HWSEQ_DCE112_REG_LIST() \
        HWSEQ_DCE10_REG_LIST(), \
        HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
-       HWSEQ_PHYPLL_REG_LIST(CRTC), \
-       BL_REG_LIST()
+       HWSEQ_PHYPLL_REG_LIST(CRTC)
 
 #define HWSEQ_DCN_REG_LIST()\
        SR(REFCLK_CNTL), \
        SR(D3VGA_CONTROL), \
        SR(D4VGA_CONTROL), \
        SR(VGA_TEST_CONTROL), \
-       SR(DC_IP_REQUEST_CNTL), \
-       BL_REG_LIST()
+       SR(DC_IP_REQUEST_CNTL)
 
 #define HWSEQ_DCN2_REG_LIST()\
        HWSEQ_DCN_REG_LIST(), \
        SR(D4VGA_CONTROL), \
        SR(D5VGA_CONTROL), \
        SR(D6VGA_CONTROL), \
-       SR(DC_IP_REQUEST_CNTL), \
-       BL_REG_LIST()
+       SR(DC_IP_REQUEST_CNTL)
 
 #define HWSEQ_DCN21_REG_LIST()\
        HWSEQ_DCN_REG_LIST(), \
        SR(D4VGA_CONTROL), \
        SR(D5VGA_CONTROL), \
        SR(D6VGA_CONTROL), \
-       SR(DC_IP_REQUEST_CNTL), \
-       BL_REG_LIST()
+       SR(DC_IP_REQUEST_CNTL)
 
 struct dce_hwseq_registers {
-
-               /* Backlight registers */
-       uint32_t LVTMA_PWRSEQ_CNTL;
-       uint32_t LVTMA_PWRSEQ_STATE;
-
        uint32_t DCFE_CLOCK_CONTROL[6];
        uint32_t DCFEV_CLOCK_CONTROL;
        uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
@@ -465,26 +448,18 @@ struct dce_hwseq_registers {
        HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
        HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
 
-#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
-       HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-
 #define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
        .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
        HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
        HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
        HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
        HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
-       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
 
 #define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
        HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
-       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
 
 #define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
@@ -507,8 +482,7 @@ struct dce_hwseq_registers {
        HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
        HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
        HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
-       HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
 
 #define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
@@ -570,8 +544,7 @@ struct dce_hwseq_registers {
        HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
        HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
        HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
-       HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh)
 
 #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -630,8 +603,7 @@ struct dce_hwseq_registers {
        HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
-       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
 
 #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -671,10 +643,7 @@ struct dce_hwseq_registers {
        HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
-       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
-       HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
 
 #define HWSEQ_REG_FIELD_LIST(type) \
        type DCFE_CLOCK_ENABLE; \
@@ -706,11 +675,7 @@ struct dce_hwseq_registers {
        type PF_LFB_REGION;\
        type PF_MAX_REGION;\
        type ENABLE_L1_TLB;\
-       type SYSTEM_ACCESS_MODE;\
-       type LVTMA_BLON;\
-       type LVTMA_DIGON;\
-       type LVTMA_DIGON_OVRD;\
-       type LVTMA_PWRSEQ_TARGET_STATE_R;
+       type SYSTEM_ACCESS_MODE;
 
 #define HWSEQ_DCN_REG_FIELD_LIST(type) \
        type HUBP_VTG_SEL; \
index 8527cce81c6fb8f315d7d88f7cfa2ea0fa43bc5a..8d8c84c81b34e2105c0a320d9122ed5094beddc5 100644 (file)
@@ -118,7 +118,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
        .enable_hpd = dce110_link_encoder_enable_hpd,
        .disable_hpd = dce110_link_encoder_disable_hpd,
        .is_dig_enabled = dce110_is_dig_enabled,
-       .destroy = dce110_link_encoder_destroy
+       .destroy = dce110_link_encoder_destroy,
+       .get_max_link_cap = dce110_link_encoder_get_max_link_cap
 };
 
 static enum bp_result link_transmitter_control(
@@ -1389,3 +1390,20 @@ void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
 
        set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
 }
+
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings)
+{
+       /* Set Default link settings */
+       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+                       LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+       /* Higher link settings based on feature supported */
+       if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+       if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+       *link_settings = max_link_cap;
+}
index 3c9368df4093ac0f52177fb8a733f244c1f804d6..384389f0e2c313c1feb9e4e65176989d89266396 100644 (file)
@@ -271,4 +271,7 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
 
 bool dce110_is_dig_enabled(struct link_encoder *enc);
 
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
+
 #endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
new file mode 100644 (file)
index 0000000..ebff9b1
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dc_dmub_srv.h"
+#include "panel_cntl.h"
+#include "dce_panel_cntl.h"
+#include "atom.h"
+
+#define TO_DCE_PANEL_CNTL(panel_cntl)\
+       container_of(panel_cntl, struct dce_panel_cntl, base)
+
+#define CTX \
+       dce_panel_cntl->base.ctx
+
+#define DC_LOGGER \
+       dce_panel_cntl->base.ctx->logger
+
+#define REG(reg)\
+       dce_panel_cntl->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+       dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name
+
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl)
+{
+       uint64_t current_backlight;
+       uint32_t round_result;
+       uint32_t pwm_period_cntl, bl_period, bl_int_count;
+       uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+       uint32_t bl_period_mask, bl_pwm_mask;
+
+       pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+       bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+       REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+       if (bl_int_count == 0)
+               bl_int_count = 16;
+
+       bl_period_mask = (1 << bl_int_count) - 1;
+       bl_period &= bl_period_mask;
+
+       bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+       if (fractional_duty_cycle_en == 0)
+               bl_pwm &= bl_pwm_mask;
+       else
+               bl_pwm &= 0xFFFF;
+
+       current_backlight = bl_pwm << (1 + bl_int_count);
+
+       if (bl_period == 0)
+               bl_period = 0xFFFF;
+
+       current_backlight = div_u64(current_backlight, bl_period);
+       current_backlight = (current_backlight + 1) >> 1;
+
+       current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+       round_result = (round_result >> (bl_int_count-1)) & 1;
+
+       current_backlight >>= bl_int_count;
+       current_backlight += round_result;
+
+       return (uint32_t)(current_backlight);
+}
+
+uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+       uint32_t value;
+       uint32_t current_backlight;
+
+       /* It must not be 0, so we have to restore them
+        * Bios bug w/a - period resets to zero,
+        * restoring to cache values which is always correct
+        */
+       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+
+       if (value == 0 || value == 1) {
+               if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) {
+                       REG_WRITE(BL_PWM_CNTL,
+                                       panel_cntl->stored_backlight_registers.BL_PWM_CNTL);
+                       REG_WRITE(BL_PWM_CNTL2,
+                                       panel_cntl->stored_backlight_registers.BL_PWM_CNTL2);
+                       REG_WRITE(BL_PWM_PERIOD_CNTL,
+                                       panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+                       REG_UPDATE(PWRSEQ_REF_DIV,
+                               BL_PWM_REF_DIV,
+                               panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+               } else {
+                       /* TODO: Note: This should not really happen since VBIOS
+                        * should have initialized PWM registers on boot.
+                        */
+                       REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+                       REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+               }
+       } else {
+               panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+                               REG_READ(BL_PWM_CNTL);
+               panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+                               REG_READ(BL_PWM_CNTL2);
+               panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+                               REG_READ(BL_PWM_PERIOD_CNTL);
+
+               REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+                               &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+       }
+
+       // Have driver take backlight control
+       // TakeBacklightControl(true)
+       value = REG_READ(BIOS_SCRATCH_2);
+       value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+       REG_WRITE(BIOS_SCRATCH_2, value);
+
+       // Enable the backlight output
+       REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+       // Unlock group 2 backlight registers
+       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_REG_LOCK, 0);
+
+       current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl);
+
+       return current_backlight;
+}
+
+bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+       uint32_t value;
+
+       REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+       return value;
+}
+
+bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+       uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
+
+       REG_GET(PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
+
+       REG_GET_2(PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
+
+       return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
+}
+
+void dce_store_backlight_level(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+       panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+               REG_READ(BL_PWM_CNTL);
+       panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+               REG_READ(BL_PWM_CNTL2);
+       panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+               REG_READ(BL_PWM_PERIOD_CNTL);
+
+       REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+               &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+}
+
+void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
+               uint32_t backlight_pwm_u16_16)
+{
+       uint32_t backlight_16bit;
+       uint32_t masked_pwm_period;
+       uint8_t bit_count;
+       uint64_t active_duty_cycle;
+       uint32_t pwm_period_bitcnt;
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+       /*
+        * 1. Find  16 bit backlight active duty cycle, where 0 <= backlight
+        * active duty cycle <= backlight period
+        */
+
+       /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
+        */
+       REG_GET_2(BL_PWM_PERIOD_CNTL,
+                       BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+                       BL_PWM_PERIOD, &masked_pwm_period);
+
+       if (pwm_period_bitcnt == 0)
+               bit_count = 16;
+       else
+               bit_count = pwm_period_bitcnt;
+
+       /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+       masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+       /* 1.2 Calculate integer active duty cycle required upper 16 bits
+        * contain integer component, lower 16 bits contain fractional component
+        * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+        */
+       active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
+
+       /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
+        * components shift by bitCount then mask 16 bits and add rounding bit
+        * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+        */
+       backlight_16bit = active_duty_cycle >> bit_count;
+       backlight_16bit &= 0xFFFF;
+       backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+       /*
+        * 2. Program register with updated value
+        */
+
+       /* 2.1 Lock group 2 backlight registers */
+
+       REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+                       BL_PWM_GRP1_REG_LOCK, 1);
+
+       // 2.2 Write new active duty cycle
+       REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+       /* 2.3 Unlock group 2 backlight registers */
+       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_REG_LOCK, 0);
+
+       /* 3 Wait for pending bit to be cleared */
+       REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+                       1, 10000);
+}
+
+static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl);
+
+       kfree(dce_panel_cntl);
+       *panel_cntl = NULL;
+}
+
+static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = {
+       .destroy = dce_panel_cntl_destroy,
+       .hw_init = dce_panel_cntl_hw_init,
+       .is_panel_backlight_on = dce_is_panel_backlight_on,
+       .is_panel_powered_on = dce_is_panel_powered_on,
+       .store_backlight_level = dce_store_backlight_level,
+       .driver_set_backlight = dce_driver_set_backlight,
+};
+
+void dce_panel_cntl_construct(
+       struct dce_panel_cntl *dce_panel_cntl,
+       const struct panel_cntl_init_data *init_data,
+       const struct dce_panel_cntl_registers *regs,
+       const struct dce_panel_cntl_shift *shift,
+       const struct dce_panel_cntl_mask *mask)
+{
+       struct panel_cntl *base = &dce_panel_cntl->base;
+
+       base->stored_backlight_registers.BL_PWM_CNTL = 0;
+       base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
+       base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
+       base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
+
+       dce_panel_cntl->regs = regs;
+       dce_panel_cntl->shift = shift;
+       dce_panel_cntl->mask = mask;
+
+       dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
+       dce_panel_cntl->base.ctx = init_data->ctx;
+       dce_panel_cntl->base.inst = init_data->inst;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
new file mode 100644 (file)
index 0000000..70ec691
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_PANEL_CNTL__DCE_H__
+#define __DC_PANEL_CNTL__DCE_H__
+
+#include "panel_cntl.h"
+
+/* set register offset with instance */
+#define DCE_PANEL_CNTL_SR(reg_name, block)\
+       .reg_name = mm ## block ## _ ## reg_name
+
+#define DCE_PANEL_CNTL_REG_LIST()\
+       DCE_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+       DCE_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+       DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+       SR(BL_PWM_CNTL), \
+       SR(BL_PWM_CNTL2), \
+       SR(BL_PWM_PERIOD_CNTL), \
+       SR(BL_PWM_GRP1_REG_LOCK), \
+       SR(BIOS_SCRATCH_2)
+
+#define DCN_PANEL_CNTL_SR(reg_name, block)\
+       .reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \
+                                       mm ## block ## _ ## reg_name
+
+#define DCN_PANEL_CNTL_REG_LIST()\
+       DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+       DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+       DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+       SR(BL_PWM_CNTL), \
+       SR(BL_PWM_CNTL2), \
+       SR(BL_PWM_PERIOD_CNTL), \
+       SR(BL_PWM_GRP1_REG_LOCK), \
+       SR(BIOS_SCRATCH_2)
+
+#define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
+       .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh)
+
+#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
+       type LVTMA_BLON;\
+       type LVTMA_DIGON;\
+       type LVTMA_DIGON_OVRD;\
+       type LVTMA_PWRSEQ_TARGET_STATE_R; \
+       type BL_PWM_REF_DIV; \
+       type BL_PWM_EN; \
+       type BL_ACTIVE_INT_FRAC_CNT; \
+       type BL_PWM_FRACTIONAL_EN; \
+       type BL_PWM_PERIOD; \
+       type BL_PWM_PERIOD_BITCNT; \
+       type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+       type BL_PWM_GRP1_REG_LOCK; \
+       type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_panel_cntl_shift {
+       DCE_PANEL_CNTL_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_panel_cntl_mask {
+       DCE_PANEL_CNTL_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_panel_cntl_registers {
+       uint32_t PWRSEQ_CNTL;
+       uint32_t PWRSEQ_STATE;
+       uint32_t BL_PWM_CNTL;
+       uint32_t BL_PWM_CNTL2;
+       uint32_t BL_PWM_PERIOD_CNTL;
+       uint32_t BL_PWM_GRP1_REG_LOCK;
+       uint32_t PWRSEQ_REF_DIV;
+       uint32_t BIOS_SCRATCH_2;
+};
+
+struct dce_panel_cntl {
+       struct panel_cntl base;
+       const struct dce_panel_cntl_registers *regs;
+       const struct dce_panel_cntl_shift *shift;
+       const struct dce_panel_cntl_mask *mask;
+};
+
+void dce_panel_cntl_construct(
+       struct dce_panel_cntl *panel_cntl,
+       const struct panel_cntl_init_data *init_data,
+       const struct dce_panel_cntl_registers *regs,
+       const struct dce_panel_cntl_shift *shift,
+       const struct dce_panel_cntl_mask *mask);
+
+#endif /* __DC_PANEL_CNTL__DCE_H__ */
index 451574971b9641327742611311ba4ecd9d991ce6..4cdaaf4d881cc42122d597ca71e3fbcb5068b8ac 100644 (file)
@@ -1336,7 +1336,6 @@ static void dce110_se_audio_setup(
 {
        struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
 
-       uint32_t speakers = 0;
        uint32_t channels = 0;
 
        ASSERT(audio_info);
@@ -1344,7 +1343,6 @@ static void dce110_se_audio_setup(
                /* This should not happen.it does so we don't get BSOD*/
                return;
 
-       speakers = audio_info->flags.info.ALLSPEAKERS;
        channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
 
        /* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
new file mode 100644 (file)
index 0000000..da0b29a
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_abm.h"
+#include "dce_abm.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed31_32.h"
+
+#include "atom.h"
+
+#define TO_DMUB_ABM(abm)\
+       container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+       (dce_abm->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+       dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+
+#define CTX \
+       dce_abm->base.ctx
+
+#define DISABLE_ABM_IMMEDIATELY 255
+
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel_inst)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = abm->ctx;
+       uint32_t ramping_boundary = 0xFFFF;
+
+       cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+       cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+       cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+       cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+       cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       return true;
+}
+
+static void dmcub_set_backlight_level(
+       struct dce_abm *dce_abm,
+       uint32_t backlight_pwm_u16_16,
+       uint32_t frame_ramp,
+       uint32_t otg_inst,
+       uint32_t panel_inst)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = dce_abm->base.ctx;
+       unsigned int backlight_8_bit = 0;
+       uint32_t s2;
+
+       if (backlight_pwm_u16_16 & 0x10000)
+               // Check for max backlight condition
+               backlight_8_bit = 0xFF;
+       else
+               // Take MSB of fractional part since backlight is not max
+               backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
+       dmub_abm_set_pipe(&dce_abm->base, otg_inst, panel_inst);
+
+       REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
+
+       if (otg_inst == 0)
+               frame_ramp = 0;
+
+       cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+       cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+       cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       // Update requested backlight level
+       s2 = REG_READ(BIOS_SCRATCH_2);
+
+       s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+       backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+                               ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+       s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+       REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+{
+       union dmub_rb_cmd cmd;
+       uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+
+       cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
+       cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
+       cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+static void dmub_abm_init(struct abm *abm, uint32_t backlight)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+       REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+       REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+       REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+       REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+       REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+       REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+                       ABM1_HG_NUM_OF_BINS_SEL, 0,
+                       ABM1_HG_VMAX_SEL, 1,
+                       ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+       REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+                       ABM1_IPCSC_COEFF_SEL_R, 2,
+                       ABM1_IPCSC_COEFF_SEL_G, 4,
+                       ABM1_IPCSC_COEFF_SEL_B, 2);
+
+       REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+                       BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+       REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+                       BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+       REG_UPDATE(BL1_PWM_USER_LEVEL,
+                       BL1_PWM_USER_LEVEL, backlight);
+
+       REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+                       ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+                       ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+       REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+                       ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+                       ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+                       ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+
+       dmub_abm_enable_fractional_pwm(abm->ctx);
+}
+
+static unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+       unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+       /* return backlight in hardware format which is unsigned 17 bits, with
+        * 1 bit integer and 16 bit fractional
+        */
+       return backlight;
+}
+
+static unsigned int dmub_abm_get_target_backlight(struct abm *abm)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+       unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+       /* return backlight in hardware format which is unsigned 17 bits, with
+        * 1 bit integer and 16 bit fractional
+        */
+       return backlight;
+}
+
+static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = abm->ctx;
+
+       cmd.abm_set_level.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
+       cmd.abm_set_level.abm_set_level_data.level = level;
+       cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       return true;
+}
+
+static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
+{
+       dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
+
+       return true;
+}
+
+static bool dmub_abm_set_backlight_level_pwm(
+               struct abm *abm,
+               unsigned int backlight_pwm_u16_16,
+               unsigned int frame_ramp,
+               unsigned int otg_inst,
+               uint32_t panel_inst)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+       dmcub_set_backlight_level(dce_abm,
+                       backlight_pwm_u16_16,
+                       frame_ramp,
+                       otg_inst,
+                       panel_inst);
+
+       return true;
+}
+
+static bool dmub_abm_init_config(struct abm *abm,
+       const char *src,
+       unsigned int bytes)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = abm->ctx;
+
+       // TODO: Optimize by only reading back final 4 bytes
+       dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+       // Copy iramtable into cw7
+       memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+
+       // Fw will copy from cw7 to fw_state
+       cmd.abm_init_config.header.type = DMUB_CMD__ABM;
+       cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
+       cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+       cmd.abm_init_config.abm_init_config_data.bytes = bytes;
+       cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       return true;
+}
+
+static const struct abm_funcs abm_funcs = {
+       .abm_init = dmub_abm_init,
+       .set_abm_level = dmub_abm_set_level,
+       .set_pipe = dmub_abm_set_pipe,
+       .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
+       .get_current_backlight = dmub_abm_get_current_backlight,
+       .get_target_backlight = dmub_abm_get_target_backlight,
+       .set_abm_immediate_disable = dmub_abm_immediate_disable,
+       .init_abm_config = dmub_abm_init_config,
+};
+
+static void dmub_abm_construct(
+       struct dce_abm *abm_dce,
+       struct dc_context *ctx,
+       const struct dce_abm_registers *regs,
+       const struct dce_abm_shift *abm_shift,
+       const struct dce_abm_mask *abm_mask)
+{
+       struct abm *base = &abm_dce->base;
+
+       base->ctx = ctx;
+       base->funcs = &abm_funcs;
+       base->dmcu_is_running = false;
+
+       abm_dce->regs = regs;
+       abm_dce->abm_shift = abm_shift;
+       abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dmub_abm_create(
+       struct dc_context *ctx,
+       const struct dce_abm_registers *regs,
+       const struct dce_abm_shift *abm_shift,
+       const struct dce_abm_mask *abm_mask)
+{
+       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+       if (abm_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+       return &abm_dce->base;
+}
+
+void dmub_abm_destroy(struct abm **abm)
+{
+       struct dce_abm *abm_dce = TO_DMUB_ABM(*abm);
+
+       kfree(abm_dce);
+       *abm = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
new file mode 100644 (file)
index 0000000..3a5d5ac
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DMUB_ABM_H__
+#define __DMUB_ABM_H__
+
+#include "abm.h"
+#include "dce_abm.h"
+
+struct abm *dmub_abm_create(
+       struct dc_context *ctx,
+       const struct dce_abm_registers *regs,
+       const struct dce_abm_shift *abm_shift,
+       const struct dce_abm_mask *abm_mask);
+
+void dmub_abm_destroy(struct abm **abm);
+
+#endif
index bc109d4fc6e6bfcbed02821c0d2b93b4e5aaee77..044a0133ebb13b1d82067e7013889c6c86c0db98 100644 (file)
 #include "dmub_psr.h"
 #include "dc.h"
 #include "dc_dmub_srv.h"
-#include "../../dmub/inc/dmub_srv.h"
-#include "../../dmub/inc/dmub_gpint_cmd.h"
+#include "dmub/dmub_srv.h"
 #include "core_types.h"
 
 #define MAX_PIPES 6
 
+/**
+ * Convert dmcub psr state to dmcu psr state.
+ */
+static void convert_psr_state(uint32_t *psr_state)
+{
+       if (*psr_state == 0)
+               *psr_state = 0;
+       else if (*psr_state == 0x10)
+               *psr_state = 1;
+       else if (*psr_state == 0x11)
+               *psr_state = 2;
+       else if (*psr_state == 0x20)
+               *psr_state = 3;
+       else if (*psr_state == 0x21)
+               *psr_state = 4;
+       else if (*psr_state == 0x30)
+               *psr_state = 5;
+       else if (*psr_state == 0x31)
+               *psr_state = 6;
+       else if (*psr_state == 0x40)
+               *psr_state = 7;
+       else if (*psr_state == 0x41)
+               *psr_state = 8;
+       else if (*psr_state == 0x42)
+               *psr_state = 9;
+       else if (*psr_state == 0x43)
+               *psr_state = 10;
+       else if (*psr_state == 0x44)
+               *psr_state = 11;
+       else if (*psr_state == 0x50)
+               *psr_state = 12;
+       else if (*psr_state == 0x51)
+               *psr_state = 13;
+       else if (*psr_state == 0x52)
+               *psr_state = 14;
+       else if (*psr_state == 0x53)
+               *psr_state = 15;
+}
+
 /**
  * Get PSR state from firmware.
  */
@@ -43,6 +81,8 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
        dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
 
        dmub_srv_get_gpint_response(srv, psr_state);
+
+       convert_psr_state(psr_state);
 }
 
 /**
@@ -53,19 +93,23 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
        union dmub_rb_cmd cmd;
        struct dc_context *dc = dmub->ctx;
 
+       if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+               return false;
+
        cmd.psr_set_version.header.type = DMUB_CMD__PSR;
        cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
-
-       if (stream->psr_version == 0x0) // Unsupported
-               return false;
-       else if (stream->psr_version == 0x1)
+       switch (stream->link->psr_settings.psr_version) {
+       case DC_PSR_VERSION_1:
                cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
-       else if (stream->psr_version == 0x2)
-               cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
-
-       cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+               break;
+       case DC_PSR_VERSION_UNSUPPORTED:
+       default:
+               cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
+               break;
+       }
+       cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 
@@ -89,7 +133,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
 
        cmd.psr_enable.header.payload_bytes = 0; // Send header only
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 }
@@ -113,7 +157,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
        cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
        cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 }
@@ -162,7 +206,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
        cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
 
        // Hw insts
-       copy_settings_data->dpphy_inst                          = psr_context->phyType;
+       copy_settings_data->dpphy_inst                          = psr_context->transmitterId;
        copy_settings_data->aux_inst                            = psr_context->channel;
        copy_settings_data->digfe_inst                          = psr_context->engineId;
        copy_settings_data->digbe_inst                          = psr_context->transmitterId;
@@ -187,8 +231,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
        copy_settings_data->smu_optimizations_en                = psr_context->allow_smu_optimizations;
        copy_settings_data->frame_delay                         = psr_context->frame_delay;
        copy_settings_data->frame_cap_ind                       = psr_context->psrFrameCaptureIndicationReq;
+       copy_settings_data->debug.visual_confirm                = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
+                                                                       true : false;
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 
index 8f78bf9abbca1b070b8af6c217bc6967d1886cd9..a28c4ae0f2599062db4c7e74d7245e31ec79ca0d 100644 (file)
@@ -46,6 +46,7 @@
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
 #include "dce100/dce100_hw_sequencer.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "reg_helper.h"
 
@@ -249,6 +250,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define opp_regs(id)\
 [id] = {\
        OPP_DCE_100_REG_LIST(id),\
@@ -627,6 +640,23 @@ struct link_encoder *dce100_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct output_pixel_processor *dce100_opp_create(
        struct dc_context *ctx,
        uint32_t inst)
@@ -943,6 +973,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
 static const struct resource_funcs dce100_res_pool_funcs = {
        .destroy = dce100_destroy_resource_pool,
        .link_enc_create = dce100_link_encoder_create,
+       .panel_cntl_create = dce100_panel_cntl_create,
        .validate_bandwidth = dce100_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce100_add_stream_to_ctx,
index c279982947e10ae7cb020485ae850f531a81a1dd..b77e9dc1608634ca9ad7329e5ecd5081234b990e 100644 (file)
@@ -53,6 +53,7 @@
 #include "abm.h"
 #include "audio.h"
 #include "reg_helper.h"
+#include "panel_cntl.h"
 
 /* include DCE11 register header files */
 #include "dce/dce_11_0_d.h"
@@ -695,31 +696,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
 
 
 
-}
-
-/*todo: cloned in stream enc, fix*/
-bool dce110_is_panel_backlight_on(struct dc_link *link)
-{
-       struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hws = ctx->dc->hwseq;
-       uint32_t value;
-
-       REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
-
-       return value;
-}
-
-bool dce110_is_panel_powered_on(struct dc_link *link)
-{
-       struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hws = ctx->dc->hwseq;
-       uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
-       REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
-
-       REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
-
-       return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
 }
 
 static enum bp_result link_transmitter_control(
@@ -810,7 +786,6 @@ void dce110_edp_power_control(
                bool power_up)
 {
        struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hwseq = ctx->dc->hwseq;
        struct bp_transmitter_control cntl = { 0 };
        enum bp_result bp_result;
 
@@ -821,7 +796,11 @@ void dce110_edp_power_control(
                return;
        }
 
-       if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
+       if (!link->panel_cntl)
+               return;
+
+       if (power_up !=
+               link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
                /* Send VBIOS command to prompt eDP panel power */
                if (power_up) {
                        unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -892,7 +871,6 @@ void dce110_edp_backlight_control(
                bool enable)
 {
        struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hws = ctx->dc->hwseq;
        struct bp_transmitter_control cntl = { 0 };
 
        if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
@@ -901,7 +879,8 @@ void dce110_edp_backlight_control(
                return;
        }
 
-       if (enable && hws->funcs.is_panel_backlight_on(link)) {
+       if (enable && link->panel_cntl &&
+               link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl)) {
                DC_LOG_HW_RESUME_S3(
                                "%s: panel already powered up. Do nothing.\n",
                                __func__);
@@ -1087,7 +1066,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
 
        if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
                hws->funcs.edp_backlight_control(link, false);
-               dc_link_set_abm_disable(link);
+               link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
        }
 
        if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1432,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 
        pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
 
-       pipe_ctx->stream->link->psr_feature_enabled = false;
+       pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
 
        return DC_OK;
 }
@@ -1838,7 +1817,7 @@ static bool should_enable_fbc(struct dc *dc,
                return false;
 
        /* PSR should not be enabled */
-       if (pipe_ctx->stream->link->psr_feature_enabled)
+       if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
                return false;
 
        /* Nothing to compress */
@@ -2376,6 +2355,7 @@ static void init_hw(struct dc *dc)
        struct abm *abm;
        struct dmcu *dmcu;
        struct dce_hwseq *hws = dc->hwseq;
+       uint32_t backlight = MAX_BACKLIGHT_LEVEL;
 
        bp = dc->ctx->dc_bios;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2422,12 +2402,17 @@ static void init_hw(struct dc *dc)
                audio->funcs->hw_init(audio);
        }
 
-       abm = dc->res_pool->abm;
-       if (abm != NULL) {
-               abm->funcs->init_backlight(abm);
-               abm->funcs->abm_init(abm);
+       for (i = 0; i < dc->link_count; i++) {
+               struct dc_link *link = dc->links[i];
+
+               if (link->panel_cntl)
+                       backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
        }
 
+       abm = dc->res_pool->abm;
+       if (abm != NULL)
+               abm->funcs->abm_init(abm, backlight);
+
        dmcu = dc->res_pool->dmcu;
        if (dmcu != NULL && abm != NULL)
                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
@@ -2735,6 +2720,53 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
                                pipe_ctx->plane_res.xfm, attributes);
 }
 
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp)
+{
+       struct dc_link *link = pipe_ctx->stream->link;
+       struct dc  *dc = link->ctx->dc;
+       struct abm *abm = pipe_ctx->stream_res.abm;
+       struct panel_cntl *panel_cntl = link->panel_cntl;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       bool fw_set_brightness = true;
+       /* DMCU -1 for all controller id values,
+        * therefore +1 here
+        */
+       uint32_t controller_id = pipe_ctx->stream_res.tg->inst + 1;
+
+       if (abm == NULL || panel_cntl == NULL || (abm->funcs->set_backlight_level_pwm == NULL))
+               return false;
+
+       if (dmcu)
+               fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+       if (!fw_set_brightness && panel_cntl->funcs->driver_set_backlight)
+               panel_cntl->funcs->driver_set_backlight(panel_cntl, backlight_pwm_u16_16);
+       else
+               abm->funcs->set_backlight_level_pwm(
+                               abm,
+                               backlight_pwm_u16_16,
+                               frame_ramp,
+                               controller_id,
+                               link->panel_cntl->inst);
+
+       return true;
+}
+
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+{
+       struct abm *abm = pipe_ctx->stream_res.abm;
+       struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+       if (abm)
+               abm->funcs->set_abm_immediate_disable(abm,
+                               pipe_ctx->stream->link->panel_cntl->inst);
+
+       if (panel_cntl)
+               panel_cntl->funcs->store_backlight_level(panel_cntl);
+}
+
 static const struct hw_sequencer_funcs dce110_funcs = {
        .program_gamut_remap = program_gamut_remap,
        .program_output_csc = program_output_csc,
@@ -2757,6 +2789,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .disable_plane = dce110_power_down_fe,
        .pipe_control_lock = dce_pipe_control_lock,
        .interdependent_update_lock = NULL,
+       .cursor_lock = dce_pipe_control_lock,
        .prepare_bandwidth = dce110_prepare_bandwidth,
        .optimize_bandwidth = dce110_optimize_bandwidth,
        .set_drr = set_drr,
@@ -2768,7 +2801,9 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dce110_set_cursor_position,
-       .set_cursor_attribute = dce110_set_cursor_attribute
+       .set_cursor_attribute = dce110_set_cursor_attribute,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dce110_private_funcs = {
@@ -2784,8 +2819,6 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
        .disable_stream_gating = NULL,
        .enable_stream_gating = NULL,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
 };
 
 void dce110_hw_sequencer_construct(struct dc *dc)
index 34be166e8ff0eaf146b0b95cd414989d68d9e9cd..fe5326df00f7eb54b3d9e8981c73d5dd8213f437 100644 (file)
@@ -85,9 +85,10 @@ void dce110_edp_wait_for_hpd_ready(
                struct dc_link *link,
                bool power_up);
 
-bool dce110_is_panel_backlight_on(struct dc_link *link);
-
-bool dce110_is_panel_powered_on(struct dc_link *link);
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp);
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
 
 #endif /* __DC_HWSS_DCE110_H__ */
 
index 4245e1f818a3d0d84138e3edd33a7c4d4a9d887d..e096d2b95ef9d49233333e7cff52ccab43de4162 100644 (file)
@@ -679,8 +679,7 @@ void dce110_opp_v_set_csc_default(
        if (default_adjust->force_hw_default == false) {
                const struct out_csc_color_matrix *elm;
                /* currently parameter not in use */
-               enum grph_color_adjust_option option =
-                       GRPH_COLOR_MATRIX_HW_DEFAULT;
+               enum grph_color_adjust_option option;
                uint32_t i;
                /*
                 * HW default false we program locally defined matrix
index bf14e9ab040ce4083ee6ed9546b354d2de9b644a..9597fc79d7faf97e6d09cecc87fb41284638192d 100644 (file)
@@ -53,6 +53,7 @@
 #include "dce/dce_abm.h"
 #include "dce/dce_dmcu.h"
 #include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
 
 #define DC_LOGGER \
                dc->ctx->logger
@@ -275,6 +276,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 static const struct dce110_aux_registers_shift aux_shift = {
        DCE_AUX_MASK_SH_LIST(__SHIFT)
 };
@@ -673,6 +686,23 @@ static struct link_encoder *dce110_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 static struct output_pixel_processor *dce110_opp_create(
        struct dc_context *ctx,
        uint32_t inst)
@@ -1203,6 +1233,7 @@ struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
 static const struct resource_funcs dce110_res_pool_funcs = {
        .destroy = dce110_destroy_resource_pool,
        .link_enc_create = dce110_link_encoder_create,
+       .panel_cntl_create = dce110_panel_cntl_create,
        .validate_bandwidth = dce110_validate_bandwidth,
        .validate_plane = dce110_validate_plane,
        .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
index 700ad8b3e54b2910b16905cd46d0d8b0fe2f7391..4a7796de2ff532325679b668b123810bd40f7246 100644 (file)
@@ -51,6 +51,7 @@
 #include "dce/dce_dmcu.h"
 #include "dce/dce_aux.h"
 #include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "reg_helper.h"
 
@@ -238,6 +239,18 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
                aux_regs(5)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define hpd_regs(id)\
 [id] = {\
        HPD_REG_LIST(id)\
@@ -631,6 +644,23 @@ struct link_encoder *dce112_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 static struct input_pixel_processor *dce112_ipp_create(
        struct dc_context *ctx, uint32_t inst)
 {
@@ -1021,6 +1051,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce112_res_pool_funcs = {
        .destroy = dce112_destroy_resource_pool,
        .link_enc_create = dce112_link_encoder_create,
+       .panel_cntl_create = dce112_panel_cntl_create,
        .validate_bandwidth = dce112_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce112_add_stream_to_ctx,
index 53ab88ef71f5ee25c0f4940ad7f8b1e1333a0251..9a9764cbd78d6c5d1d9e04c030df172cf0b10114 100644 (file)
@@ -44,6 +44,7 @@
 #include "dce/dce_clock_source.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_mem_input.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "dce110/dce110_hw_sequencer.h"
 #include "dce120/dce120_hw_sequencer.h"
@@ -293,6 +294,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 static const struct dce110_aux_registers_shift aux_shift = {
        DCE12_AUX_MASK_SH_LIST(__SHIFT)
 };
@@ -715,6 +728,23 @@ static struct link_encoder *dce120_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 static struct input_pixel_processor *dce120_ipp_create(
        struct dc_context *ctx, uint32_t inst)
 {
@@ -880,6 +910,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce120_res_pool_funcs = {
        .destroy = dce120_destroy_resource_pool,
        .link_enc_create = dce120_link_encoder_create,
+       .panel_cntl_create = dce120_panel_cntl_create,
        .validate_bandwidth = dce112_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce112_add_stream_to_ctx,
index 893261c81854b9eac7d1f0803329774169e44823..d2ceebdbdf51607bf4a74dd3d674c0cf5eba67a9 100644 (file)
 #include "dce/dce_8_0_d.h"
 #include "dce/dce_8_0_sh_mask.h"
 
-struct dce80_hw_seq_reg_offsets {
-       uint32_t crtc;
-};
-
-static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
-{
-       .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-}
-};
-
-#define HW_REG_CRTC(reg, id)\
-       (reg + reg_offsets[id].crtc)
-
 /*******************************************************************************
  * Private definitions
  ******************************************************************************/
index 2ad5c28c6e66caf1c08afe847a3a722f8d2cc1d1..a19be9de2df7d87a76e14db7792abdae35a82c18 100644 (file)
@@ -50,6 +50,7 @@
 #include "dce/dce_hwseq.h"
 #include "dce80/dce80_hw_sequencer.h"
 #include "dce100/dce100_resource.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "reg_helper.h"
 
@@ -266,6 +267,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define opp_regs(id)\
 [id] = {\
        OPP_DCE_80_REG_LIST(id),\
@@ -728,6 +741,23 @@ struct link_encoder *dce80_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct clock_source *dce80_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -909,6 +939,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce80_res_pool_funcs = {
        .destroy = dce80_destroy_resource_pool,
        .link_enc_create = dce80_link_encoder_create,
+       .panel_cntl_create = dce80_panel_cntl_create,
        .validate_bandwidth = dce80_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce100_add_stream_to_ctx,
index 0e682b5aa3ebe0b6ffc700be5cfc0bec0d12a4df..7f8456b9988b243711e75136ff7fd6a0fd5f4ba0 100644 (file)
@@ -134,13 +134,6 @@ bool dpp1_get_optimal_number_of_taps(
                struct scaler_data *scl_data,
                const struct scaling_taps *in_taps)
 {
-       uint32_t pixel_width;
-
-       if (scl_data->viewport.width > scl_data->recout.width)
-               pixel_width = scl_data->recout.width;
-       else
-               pixel_width = scl_data->viewport.width;
-
        /* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
        if (scl_data->format == PIXEL_FORMAT_FP16 &&
                dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
index 31b64733d693576b23e751616b5be1c8c34fed7e..319366ebb44fe4f8e1521c842e05e1cc5340aae1 100644 (file)
@@ -1139,6 +1139,8 @@ void hubp1_cursor_set_position(
        int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
        int x_hotspot = pos->x_hotspot;
        int y_hotspot = pos->y_hotspot;
+       int cursor_height = (int)hubp->curs_attr.height;
+       int cursor_width = (int)hubp->curs_attr.width;
        uint32_t dst_x_offset;
        uint32_t cur_en = pos->enable ? 1 : 0;
 
@@ -1152,10 +1154,16 @@ void hubp1_cursor_set_position(
        if (hubp->curs_attr.address.quad_part == 0)
                return;
 
+       // Rotated cursor width/height and hotspots tweaks for offset calculation
        if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
-               src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
-               y_hotspot = pos->x_hotspot;
-               x_hotspot = pos->y_hotspot;
+               swap(cursor_height, cursor_width);
+               if (param->rotation == ROTATION_ANGLE_90) {
+                       src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+                       src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+               }
+       } else if (param->rotation == ROTATION_ANGLE_180) {
+               src_x_offset = pos->x - param->viewport.x;
+               src_y_offset = pos->y - param->viewport.y;
        }
 
        if (param->mirror) {
@@ -1177,13 +1185,13 @@ void hubp1_cursor_set_position(
        if (src_x_offset >= (int)param->viewport.width)
                cur_en = 0;  /* not visible beyond right edge*/
 
-       if (src_x_offset + (int)hubp->curs_attr.width <= 0)
+       if (src_x_offset + cursor_width <= 0)
                cur_en = 0;  /* not visible beyond left edge*/
 
        if (src_y_offset >= (int)param->viewport.height)
                cur_en = 0;  /* not visible beyond bottom edge*/
 
-       if (src_y_offset + (int)hubp->curs_attr.height <= 0)
+       if (src_y_offset + cursor_height <= 0)
                cur_en = 0;  /* not visible beyond top edge*/
 
        if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
index b0357546471b22cf24d40a31480c1a8e09e7f84b..f36d1f57b846e1cca4835610cde3c241fc872710 100644 (file)
@@ -826,6 +826,14 @@ enum dc_status dcn10_enable_stream_timing(
        color_space = stream->output_color_space;
        color_space_to_black_color(dc, color_space, &black_color);
 
+       /*
+        * The way 420 is packed, 2 channels carry Y component, 1 channel
+        * alternate between Cb and Cr, so both channels need the pixel
+        * value for Y
+        */
+       if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               black_color.color_r_cr = black_color.color_g_y;
+
        if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
                pipe_ctx->stream_res.tg->funcs->set_blank_color(
                                pipe_ctx->stream_res.tg,
@@ -903,7 +911,7 @@ static void dcn10_reset_back_end_for_pipe(
        if (pipe_ctx->top_pipe == NULL) {
 
                if (pipe_ctx->stream_res.abm)
-                       pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+                       dc->hwss.set_abm_immediate_disable(pipe_ctx);
 
                pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
 
@@ -1238,12 +1246,13 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 
 void dcn10_init_hw(struct dc *dc)
 {
-       int i;
+       int i, j;
        struct abm *abm = dc->res_pool->abm;
        struct dmcu *dmcu = dc->res_pool->dmcu;
        struct dce_hwseq *hws = dc->hwseq;
        struct dc_bios *dcb = dc->ctx->dc_bios;
        struct resource_pool *res_pool = dc->res_pool;
+       uint32_t backlight = MAX_BACKLIGHT_LEVEL;
 
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
                dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -1333,17 +1342,28 @@ void dcn10_init_hw(struct dc *dc)
                                continue;
 
                        /*
-                        * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
-                        * which needs to read dpcd info with the help of aconnector.
-                        * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
-                        * cannot be read.
+                        * If any of the displays are lit up turn them off.
+                        * The reason is that some MST hubs cannot be turned off
+                        * completely until we tell them to do so.
+                        * If not turned off, then displays connected to MST hub
+                        * won't light up.
                         */
-                       if (dc->links[i]->priv) {
-                               /* if any of the displays are lit up turn them off */
-                               status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
-                                                               &dpcd_power_state, sizeof(dpcd_power_state));
-                               if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
-                                       dp_receiver_power_ctrl(dc->links[i], false);
+                       status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+                                                       &dpcd_power_state, sizeof(dpcd_power_state));
+                       if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+                               /* blank dp stream before power off receiver*/
+                               if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+                                       unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
+
+                                       for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+                                               if (fe == dc->res_pool->stream_enc[j]->id) {
+                                                       dc->res_pool->stream_enc[j]->funcs->dp_blank(
+                                                                               dc->res_pool->stream_enc[j]);
+                                                       break;
+                                               }
+                                       }
+                               }
+                               dp_receiver_power_ctrl(dc->links[i], false);
                        }
                }
        }
@@ -1361,17 +1381,54 @@ void dcn10_init_hw(struct dc *dc)
                                        !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
        }
 
+       /* In headless boot cases, DIG may be turned
+        * on which causes HW/SW discrepancies.
+        * To avoid this, power down hardware on boot
+        * if DIG is turned on and seamless boot not enabled
+        */
+       if (dc->config.power_down_display_on_boot) {
+               struct dc_link *edp_link = get_edp_link(dc);
+
+               if (edp_link &&
+                               edp_link->link_enc->funcs->is_dig_enabled &&
+                               edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+                               dc->hwss.edp_backlight_control &&
+                               dc->hwss.power_down &&
+                               dc->hwss.edp_power_control) {
+                       dc->hwss.edp_backlight_control(edp_link, false);
+                       dc->hwss.power_down(dc);
+                       dc->hwss.edp_power_control(edp_link, false);
+               } else {
+                       for (i = 0; i < dc->link_count; i++) {
+                               struct dc_link *link = dc->links[i];
+
+                               if (link->link_enc->funcs->is_dig_enabled &&
+                                               link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+                                               dc->hwss.power_down) {
+                                       dc->hwss.power_down(dc);
+                                       break;
+                               }
+
+                       }
+               }
+       }
+
        for (i = 0; i < res_pool->audio_count; i++) {
                struct audio *audio = res_pool->audios[i];
 
                audio->funcs->hw_init(audio);
        }
 
-       if (abm != NULL) {
-               abm->funcs->init_backlight(abm);
-               abm->funcs->abm_init(abm);
+       for (i = 0; i < dc->link_count; i++) {
+               struct dc_link *link = dc->links[i];
+
+               if (link->panel_cntl)
+                       backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
        }
 
+       if (abm != NULL)
+               abm->funcs->abm_init(abm, backlight);
+
        if (dmcu != NULL && !dmcu->auto_load_dmcu)
                dmcu->funcs->dmcu_init(dmcu);
 
@@ -1625,6 +1682,16 @@ void dcn10_pipe_control_lock(
                hws->funcs.verify_allow_pstate_change_high(dc);
 }
 
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
+{
+       /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
+       if (!pipe || pipe->top_pipe)
+               return;
+
+       dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
+                       pipe->stream_res.opp->inst, lock);
+}
+
 static bool wait_for_reset_trigger_to_occur(
        struct dc_context *dc_ctx,
        struct timing_generator *tg)
@@ -2085,25 +2152,25 @@ void dcn10_get_surface_visual_confirm_color(
 
        switch (pipe_ctx->plane_res.scl_data.format) {
        case PIXEL_FORMAT_ARGB8888:
-               /* set boarder color to red */
+               /* set border color to red */
                color->color_r_cr = color_value;
                break;
 
        case PIXEL_FORMAT_ARGB2101010:
-               /* set boarder color to blue */
+               /* set border color to blue */
                color->color_b_cb = color_value;
                break;
        case PIXEL_FORMAT_420BPP8:
-               /* set boarder color to green */
+               /* set border color to green */
                color->color_g_y = color_value;
                break;
        case PIXEL_FORMAT_420BPP10:
-               /* set boarder color to yellow */
+               /* set border color to yellow */
                color->color_g_y = color_value;
                color->color_r_cr = color_value;
                break;
        case PIXEL_FORMAT_FP16:
-               /* set boarder color to white */
+               /* set border color to white */
                color->color_r_cr = color_value;
                color->color_b_cb = color_value;
                color->color_g_y = color_value;
@@ -2128,25 +2195,25 @@ void dcn10_get_hdr_visual_confirm_color(
        switch (top_pipe_ctx->plane_res.scl_data.format) {
        case PIXEL_FORMAT_ARGB2101010:
                if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
-                       /* HDR10, ARGB2101010 - set boarder color to red */
+                       /* HDR10, ARGB2101010 - set border color to red */
                        color->color_r_cr = color_value;
                } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
-                       /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+                       /* FreeSync 2 ARGB2101010 - set border color to pink */
                        color->color_r_cr = color_value;
                        color->color_b_cb = color_value;
                }
                break;
        case PIXEL_FORMAT_FP16:
                if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
-                       /* HDR10, FP16 - set boarder color to blue */
+                       /* HDR10, FP16 - set border color to blue */
                        color->color_b_cb = color_value;
                } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
-                       /* FreeSync 2 HDR - set boarder color to green */
+                       /* FreeSync 2 HDR - set border color to green */
                        color->color_g_y = color_value;
                }
                break;
        default:
-               /* SDR - set boarder color to Gray */
+               /* SDR - set border color to Gray */
                color->color_r_cr = color_value/2;
                color->color_b_cb = color_value/2;
                color->color_g_y = color_value/2;
@@ -2195,6 +2262,14 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
                                &blnd_cfg.black_color);
        }
 
+       /*
+        * The way 420 is packed, 2 channels carry Y component, 1 channel
+        * alternate between Cb and Cr, so both channels need the pixel
+        * value for Y
+        */
+       if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
+
        if (per_pixel_alpha)
                blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
        else
@@ -2431,12 +2506,12 @@ void dcn10_blank_pixel_data(
                if (stream_res->tg->funcs->set_blank)
                        stream_res->tg->funcs->set_blank(stream_res->tg, blank);
                if (stream_res->abm) {
-                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+                                       stream->link->panel_cntl->inst);
                        stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
                }
        } else if (blank) {
-               if (stream_res->abm)
-                       stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+               dc->hwss.set_abm_immediate_disable(pipe_ctx);
                if (stream_res->tg->funcs->set_blank)
                        stream_res->tg->funcs->set_blank(stream_res->tg, blank);
        }
index 16a50e05ffbfaa13ba23121fe5da7443632241f1..af51424315d5ca9f4c38b0102c22a7a9c20e6c4e 100644 (file)
@@ -49,6 +49,7 @@ void dcn10_pipe_control_lock(
        struct dc *dc,
        struct pipe_ctx *pipe,
        bool lock);
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
 void dcn10_blank_pixel_data(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
index dd02d3983695aba3f6b91c82637edda27a76f1e2..897a3d25685a2ce985fea97b7369c9fa31981f99 100644 (file)
@@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .disable_audio_stream = dce110_disable_audio_stream,
        .disable_plane = dcn10_disable_plane,
        .pipe_control_lock = dcn10_pipe_control_lock,
+       .cursor_lock = dcn10_cursor_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
        .prepare_bandwidth = dcn10_prepare_bandwidth,
        .optimize_bandwidth = dcn10_optimize_bandwidth,
@@ -71,6 +72,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .set_clock = dcn10_set_clock,
        .get_clock = dcn10_get_clock,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dcn10_private_funcs = {
@@ -87,8 +90,6 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
        .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
        .enable_stream_timing = dcn10_enable_stream_timing,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
        .disable_stream_gating = NULL,
        .enable_stream_gating = NULL,
        .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
index d3617d6785a7edafa8d28314d67854295e0ca0b9..7fd385be3f3def5756ef9fd88e6e5c66f720cbb4 100644 (file)
@@ -90,7 +90,8 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
        .is_dig_enabled = dcn10_is_dig_enabled,
        .get_dig_frontend = dcn10_get_dig_frontend,
        .get_dig_mode = dcn10_get_dig_mode,
-       .destroy = dcn10_link_encoder_destroy
+       .destroy = dcn10_link_encoder_destroy,
+       .get_max_link_cap = dcn10_link_encoder_get_max_link_cap,
 };
 
 static enum bp_result link_transmitter_control(
@@ -1370,7 +1371,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
                        DC_HPD_EN, 0);
 }
 
-
 #define AUX_REG(reg)\
        (enc10->aux_regs->reg)
 
@@ -1425,3 +1425,19 @@ enum signal_type dcn10_get_dig_mode(
        return SIGNAL_TYPE_NONE;
 }
 
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings)
+{
+       /* Set Default link settings */
+       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+                       LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+       /* Higher link settings based on feature supported */
+       if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+       if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+       *link_settings = max_link_cap;
+}
index 762109174fb879650a32df771596410d43d2f564..68395bcc24fddf93b1715ce783c6b7b61de72610 100644 (file)
@@ -575,4 +575,7 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
 
 enum signal_type dcn10_get_dig_mode(
        struct link_encoder *enc);
+
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
 #endif /* __DC_LINK_ENCODER__DCN10_H__ */
index 04f863499cfb75352ef0a2b6a69fe165f645dabd..3fcd408e9103250933044ff3721de622c7d5247c 100644 (file)
@@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
        REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
        REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
 
+       /* Configure VUPDATE lock set for this MPCC to map to the OPP */
+       REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
+
        /* update mpc tree mux setting */
        if (tree->opp_list == insert_above_mpcc) {
                /* insert the toppest mpcc */
@@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
                /* mark this mpcc as not in use */
                mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
@@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
        }
 }
 
@@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
                mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
        }
@@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
        REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
        REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
        REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+       REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
        mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
 
@@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
                        MPCC_BUSY, &s->busy);
 }
 
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
+{
+       struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+       REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
+}
+
 static const struct mpc_funcs dcn10_mpc_funcs = {
        .read_mpcc_state = mpc1_read_mpcc_state,
        .insert_plane = mpc1_insert_plane,
@@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
        .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
        .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
        .update_blending = mpc1_update_blending,
+       .cursor_lock = mpc1_cursor_lock,
        .set_denorm = NULL,
        .set_denorm_clamp = NULL,
        .set_output_csc = NULL,
index 962a68e322ee24b5015e3f37ceb9beec600440ab..66a4719c22a0c15a4fe4f2c9696ae16af5fe29d6 100644 (file)
        SRII(MPCC_BG_G_Y, MPCC, inst),\
        SRII(MPCC_BG_R_CR, MPCC, inst),\
        SRII(MPCC_BG_B_CB, MPCC, inst),\
-       SRII(MPCC_BG_B_CB, MPCC, inst),\
-       SRII(MPCC_SM_CONTROL, MPCC, inst)
+       SRII(MPCC_SM_CONTROL, MPCC, inst),\
+       SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
 
 #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
-       SRII(MUX, MPC_OUT, inst)
+       SRII(MUX, MPC_OUT, inst),\
+       VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
 
 #define MPC_COMMON_REG_VARIABLE_LIST \
        uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
@@ -55,7 +56,9 @@
        uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
        uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
        uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
-       uint32_t MUX[MAX_OPP];
+       uint32_t MUX[MAX_OPP]; \
+       uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
+       uint32_t CUR[MAX_OPP];
 
 #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
        SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
@@ -78,7 +81,8 @@
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
-       SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+       SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
+       SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
 
 #define MPC_REG_FIELD_LIST(type) \
        type MPCC_TOP_SEL;\
        type MPCC_SM_FIELD_ALT;\
        type MPCC_SM_FORCE_NEXT_FRAME_POL;\
        type MPCC_SM_FORCE_NEXT_TOP_POL;\
-       type MPC_OUT_MUX;
+       type MPC_OUT_MUX;\
+       type MPCC_UPDATE_LOCK_SEL;\
+       type CUR_VUPDATE_LOCK_SET;
 
 struct dcn_mpc_registers {
        MPC_COMMON_REG_VARIABLE_LIST
@@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
                int mpcc_inst,
                struct mpcc_state *s);
 
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
+
 #endif
index 17d96ec6acd8fc8d3ba3e17875d7d6c57c0f7190..ec0ab42becbac1136f4e9358e687abc632bc8196 100644 (file)
@@ -299,6 +299,7 @@ void optc1_set_vtg_params(struct timing_generator *optc,
        uint32_t asic_blank_end;
        uint32_t v_init;
        uint32_t v_fp2 = 0;
+       int32_t vertical_line_start;
 
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
@@ -315,8 +316,9 @@ void optc1_set_vtg_params(struct timing_generator *optc,
                        patched_crtc_timing.v_border_top;
 
        /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
-       if (optc1->vstartup_start > asic_blank_end)
-               v_fp2 = optc1->vstartup_start - asic_blank_end;
+       vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
+       if (vertical_line_start < 0)
+               v_fp2 = -vertical_line_start;
 
        /* Interlace */
        if (REG(OTG_INTERLACE_CONTROL)) {
index 9a459a8fe8a055802b3ff7ab550a619938c5eefa..8d1e52fb0393918ff2fd39dc0993484d43c38a02 100644 (file)
@@ -158,6 +158,7 @@ struct dcn_optc_registers {
        uint32_t OTG_GSL_WINDOW_Y;
        uint32_t OTG_VUPDATE_KEEPOUT;
        uint32_t OTG_CRC_CNTL;
+       uint32_t OTG_CRC_CNTL2;
        uint32_t OTG_CRC0_DATA_RG;
        uint32_t OTG_CRC0_DATA_B;
        uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
@@ -475,7 +476,11 @@ struct dcn_optc_registers {
        type OPTC_DSC_SLICE_WIDTH;\
        type OPTC_SEGMENT_WIDTH;\
        type OPTC_DWB0_SOURCE_SELECT;\
-       type OPTC_DWB1_SOURCE_SELECT;
+       type OPTC_DWB1_SOURCE_SELECT;\
+       type OTG_CRC_DSC_MODE;\
+       type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+       type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+       type OTG_CRC_DATA_FORMAT;
 
 
 
index 07265ca7d28cc083eebaaa2381cf3413be78be3e..17d5cb422025e13dfb237d2ef6979364c2b10cde 100644 (file)
@@ -51,6 +51,7 @@
 #include "dce112/dce112_resource.h"
 #include "dcn10_hubp.h"
 #include "dcn10_hubbub.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "soc15_hw_ip.h"
 #include "vega10_ip_offset.h"
@@ -181,6 +182,14 @@ enum dcn10_clk_src_array_id {
        .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## 0 ## _ ## block ## id
+
+/* set field/register/bitfield name */
+#define SFRB(field_name, reg_name, bitfield, post_fix)\
+       .field_name = reg_name ## __ ## bitfield ## post_fix
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIF_BASE__INST0_SEG ## seg
@@ -321,6 +330,18 @@ static const struct dcn10_link_enc_mask le_mask = {
                LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 static const struct dce110_aux_registers_shift aux_shift = {
        DCN10_AUX_MASK_SH_LIST(__SHIFT)
 };
@@ -419,11 +440,13 @@ static const struct dcn_mpc_registers mpc_regs = {
 };
 
 static const struct dcn_mpc_shift mpc_shift = {
-       MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+       MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
+       SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
 };
 
 static const struct dcn_mpc_mask mpc_mask = {
-       MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+       MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
+       SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
 };
 
 #define tg_regs(id)\
@@ -807,6 +830,23 @@ struct link_encoder *dcn10_link_encoder_create(
        return &enc10->base;
 }
 
+static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct clock_source *dcn10_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -1081,24 +1121,6 @@ static enum dc_status build_mapped_resource(
 {
        struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
 
-       /*TODO Seems unneeded anymore */
-       /*      if (old_context && resource_is_stream_unchanged(old_context, stream)) {
-                       if (stream != NULL && old_context->streams[i] != NULL) {
-                                todo: shouldn't have to copy missing parameter here
-                               resource_build_bit_depth_reduction_params(stream,
-                                               &stream->bit_depth_params);
-                               stream->clamping.pixel_encoding =
-                                               stream->timing.pixel_encoding;
-
-                               resource_build_bit_depth_reduction_params(stream,
-                                                               &stream->bit_depth_params);
-                               build_clamping_params(stream);
-
-                               continue;
-                       }
-               }
-       */
-
        if (!pipe_ctx)
                return DC_ERROR_UNEXPECTED;
 
@@ -1291,6 +1313,7 @@ static const struct dc_cap_funcs cap_funcs = {
 static const struct resource_funcs dcn10_res_pool_funcs = {
        .destroy = dcn10_destroy_resource_pool,
        .link_enc_create = dcn10_link_encoder_create,
+       .panel_cntl_create = dcn10_panel_cntl_create,
        .validate_bandwidth = dcn_validate_bandwidth,
        .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
        .validate_plane = dcn10_validate_plane,
@@ -1353,6 +1376,40 @@ static bool dcn10_resource_construct(
        /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
        dc->caps.force_dp_tps4_for_cp2520 = true;
 
+       /* Color pipeline capabilities */
+       dc->caps.color.dpp.dcn_arch = 1;
+       dc->caps.color.dpp.input_lut_shared = 1;
+       dc->caps.color.dpp.icsc = 1;
+       dc->caps.color.dpp.dgam_ram = 1;
+       dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+       dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.post_csc = 0;
+       dc->caps.color.dpp.gamma_corr = 0;
+
+       dc->caps.color.dpp.hw_3d_lut = 0;
+       dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1
+       dc->caps.color.dpp.ogam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+       dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.ocsc = 1;
+
+       /* no post-blend color operations */
+       dc->caps.color.mpc.gamut_remap = 0;
+       dc->caps.color.mpc.num_3dluts = 0;
+       dc->caps.color.mpc.shared_3d_lut = 0;
+       dc->caps.color.mpc.ogam_ram = 0;
+       dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+       dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+       dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+       dc->caps.color.mpc.ocsc = 0;
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else
index 7eba9333c3285dc9420b32cf4f530888c93b2794..07b2f9399671dbca108cbb021d8c7cc988290739 100644 (file)
@@ -1274,7 +1274,6 @@ static void enc1_se_audio_setup(
 {
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-       uint32_t speakers = 0;
        uint32_t channels = 0;
 
        ASSERT(audio_info);
@@ -1282,7 +1281,6 @@ static void enc1_se_audio_setup(
                /* This should not happen.it does so we don't get BSOD*/
                return;
 
-       speakers = audio_info->flags.info.ALLSPEAKERS;
        channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
 
        /* setup the audio stream source select (audio -> dig mapping) */
index 22f421e82733b04fa91ca859d5d8ab00646e4f4c..da5333d165ace594960bff5a8a667ea943b4256d 100644 (file)
@@ -961,8 +961,7 @@ void dcn20_blank_pixel_data(
        width = width / odm_cnt;
 
        if (blank) {
-               if (stream_res->abm)
-                       stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+               dc->hwss.set_abm_immediate_disable(pipe_ctx);
 
                if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
                        test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
@@ -997,7 +996,8 @@ void dcn20_blank_pixel_data(
 
        if (!blank)
                if (stream_res->abm) {
-                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+                                       stream->link->panel_cntl->inst);
                        stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
                }
 }
@@ -1478,8 +1478,11 @@ static void dcn20_program_pipe(
        if (pipe_ctx->update_flags.bits.odm)
                hws->funcs.update_odm(dc, context, pipe_ctx);
 
-       if (pipe_ctx->update_flags.bits.enable)
+       if (pipe_ctx->update_flags.bits.enable) {
                dcn20_enable_plane(dc, pipe_ctx, context);
+               if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+                       dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+       }
 
        if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
                dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -2037,8 +2040,7 @@ static void dcn20_reset_back_end_for_pipe(
         */
        if (pipe_ctx->top_pipe == NULL) {
 
-               if (pipe_ctx->stream_res.abm)
-                       pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+               dc->hwss.set_abm_immediate_disable(pipe_ctx);
 
                pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
 
@@ -2171,6 +2173,13 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
         */
        mpcc_id = hubp->inst;
 
+       /* If there is no full update, don't need to touch MPC tree*/
+       if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+               !pipe_ctx->update_flags.bits.mpcc) {
+               mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+               return;
+       }
+
        /* check if this MPCC is already being used */
        new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
        /* remove MPCC if being used */
@@ -2294,7 +2303,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
 
        REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
        REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
-       REG_WRITE(REFCLK_CNTL, 0);
+       if (REG(REFCLK_CNTL))
+               REG_WRITE(REFCLK_CNTL, 0);
        //
 
 
index 1e73357eda340acace5542a64e6045d6fb65b791..a8bcd747d7ba2293597278089f5d4542cff0a80d 100644 (file)
@@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .disable_plane = dcn20_disable_plane,
        .pipe_control_lock = dcn20_pipe_control_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
+       .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn20_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
@@ -82,6 +83,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .init_vm_ctx = dcn20_init_vm_ctx,
        .set_flip_control_gsl = dcn20_set_flip_control_gsl,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
@@ -97,8 +100,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
        .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
        .enable_stream_timing = dcn20_enable_stream_timing,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
        .disable_stream_gating = dcn20_disable_stream_gating,
        .enable_stream_gating = dcn20_enable_stream_gating,
        .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
index e4ac73035c84a4522e2781ee44ac176157d2b46b..8d209dae66e6ae8a8130edfd6da07424346d6a19 100644 (file)
 #define IND_REG(index) \
        (enc10->link_regs->index)
 
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
 
 static struct mpll_cfg dcn2_mpll_cfg[] = {
        // RBR
@@ -260,6 +266,38 @@ void dcn20_link_encoder_enable_dp_output(
 
 }
 
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t is_in_usb_c_dp4_mode = 0;
+
+       dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+       /* in usb c dp2 mode, max lane count is 2 */
+       if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+               if (!is_in_usb_c_dp4_mode)
+                       link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+       }
+
+}
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t dp_alt_mode_disable = 0;
+       bool is_usb_c_alt_mode = false;
+
+       if (enc->features.flags.bits.DP_IS_USB_C) {
+               /* if value == 1 alt mode is disabled, otherwise it is enabled */
+               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+               is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+       }
+
+       return is_usb_c_alt_mode;
+}
+
 #define AUX_REG(reg)\
        (enc10->aux_regs->reg)
 
@@ -338,6 +376,8 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
        .fec_is_active = enc2_fec_is_active,
        .get_dig_mode = dcn10_get_dig_mode,
        .get_dig_frontend = dcn10_get_dig_frontend,
+       .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+       .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
 };
 
 void dcn20_link_encoder_construct(
index 8cab8107fd94c2094610685a1e2bd6db0f15c027..284a1ee4d249ef1bfe15326bf01766f1d9a2d121 100644 (file)
@@ -343,6 +343,10 @@ void dcn20_link_encoder_enable_dp_output(
        const struct dc_link_settings *link_settings,
        enum clock_source_id clock_source);
 
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
+
 void dcn20_link_encoder_construct(
        struct dcn20_link_encoder *enc20,
        const struct encoder_init_data *init_data,
index de9c857ab3e97abf9bd8ca0e53896a391b46e55f..99cc095dc33c7e49777710e137c535d5cf088d9b 100644 (file)
@@ -452,7 +452,7 @@ void mpc2_set_output_gamma(
                next_mode = LUT_RAM_A;
 
        mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
-       mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false);
+       mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
 
        if (next_mode == LUT_RAM_A)
                mpc2_program_luta(mpc, mpcc_id, params);
@@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
        .mpc_init = mpc1_mpc_init,
        .mpc_init_single_inst = mpc1_mpc_init_single_inst,
        .update_blending = mpc2_update_blending,
+       .cursor_lock = mpc1_cursor_lock,
        .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
        .wait_for_idle = mpc2_assert_idle_mpcc,
        .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
index c78fd5123497b6f14a4b043aba3c01aec9ba4297..496658f420dbdb6cb167059bc3a84f48023b262c 100644 (file)
        SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
        SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
        SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
-       SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+       SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+       SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
 
 /*
  *     DCN2 MPC_OCSC debug status register:
index d875b0c38fded4928c81e2173ba3b425f5e54963..8c16967fe01807c96eacb37576690a1070a1a2c6 100644 (file)
@@ -409,6 +409,18 @@ void optc2_program_manual_trigger(struct timing_generator *optc)
                        OTG_TRIGA_MANUAL_TRIG, 1);
 }
 
+bool optc2_configure_crc(struct timing_generator *optc,
+                         const struct crc_params *params)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       REG_SET_2(OTG_CRC_CNTL2, 0,
+                       OTG_CRC_DSC_MODE, params->dsc_mode,
+                       OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
+
+       return optc1_configure_crc(optc, params);
+}
+
 static struct timing_generator_funcs dcn20_tg_funcs = {
                .validate_timing = optc1_validate_timing,
                .program_timing = optc1_program_timing,
@@ -452,7 +464,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
                .clear_optc_underflow = optc1_clear_optc_underflow,
                .setup_global_swap_lock = NULL,
                .get_crc = optc1_get_crc,
-               .configure_crc = optc1_configure_crc,
+               .configure_crc = optc2_configure_crc,
                .set_dsc_config = optc2_set_dsc_config,
                .set_dwb_source = optc2_set_dwb_source,
                .set_odm_bypass = optc2_set_odm_bypass,
index 239cc40ae474be3765e7bbd54c9766a52b2f9712..e0a0a8a8e2c606214f72fc32cc6e8c2ec6bfc625 100644 (file)
@@ -36,6 +36,7 @@
        SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
        SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
        SRI(OTG_DSC_START_POSITION, OTG, inst),\
+       SRI(OTG_CRC_CNTL2, OTG, inst),\
        SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
        SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
        SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
        SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
        SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
        SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
        SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
        SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
        SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
@@ -109,4 +114,6 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
 void optc2_setup_manual_trigger(struct timing_generator *optc);
 void optc2_program_manual_trigger(struct timing_generator *optc);
 bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+bool optc2_configure_crc(struct timing_generator *optc,
+                         const struct crc_params *params);
 #endif /* __DC_OPTC_DCN20_H__ */
index 5cdbba0cd87316a7df2611b434f885412e7f4fd4..778e2e8fd2c6bfd61d10f52f4349dc7b598eadb5 100644 (file)
@@ -61,6 +61,7 @@
 #include "dcn20_dccg.h"
 #include "dcn20_vmid.h"
 #include "dc_link_ddc.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "navi10_ip_offset.h"
 
@@ -508,6 +509,10 @@ enum dcn20_clk_src_array_id {
        .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## _ ## block ## id
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIO_BASE__INST0_SEG ## seg
@@ -687,6 +692,18 @@ static const struct dcn10_link_enc_mask le_mask = {
        DPCS_DCN2_MASK_SH_LIST(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define ipp_regs(id)\
 [id] = {\
        IPP_REG_LIST_DCN20(id),\
@@ -1289,6 +1306,23 @@ struct link_encoder *dcn20_link_encoder_create(
        return &enc20->enc10.base;
 }
 
+static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct clock_source *dcn20_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -1619,24 +1653,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
        enum dc_status status = DC_OK;
        struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
 
-       /*TODO Seems unneeded anymore */
-       /*      if (old_context && resource_is_stream_unchanged(old_context, stream)) {
-                       if (stream != NULL && old_context->streams[i] != NULL) {
-                                todo: shouldn't have to copy missing parameter here
-                               resource_build_bit_depth_reduction_params(stream,
-                                               &stream->bit_depth_params);
-                               stream->clamping.pixel_encoding =
-                                               stream->timing.pixel_encoding;
-
-                               resource_build_bit_depth_reduction_params(stream,
-                                                               &stream->bit_depth_params);
-                               build_clamping_params(stream);
-
-                               continue;
-                       }
-               }
-       */
-
        if (!pipe_ctx)
                return DC_ERROR_UNEXPECTED;
 
@@ -1935,8 +1951,6 @@ void dcn20_split_stream_for_mpc(
        secondary_pipe->top_pipe = primary_pipe;
 
        ASSERT(primary_pipe->plane_state);
-       resource_build_scaling_params(primary_pipe);
-       resource_build_scaling_params(secondary_pipe);
 }
 
 void dcn20_populate_dml_writeback_from_context(
@@ -2212,12 +2226,12 @@ int dcn20_populate_dml_pipes_from_context(
                                        || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
                        pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
                                        || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
-                       pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
-                       pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
-                       pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
-                       pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
-                       pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
-                       pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+                       pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
+                       pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
+                       pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
+                       pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
+                       pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
+                       pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
                        pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
                        pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
                        pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
@@ -2562,11 +2576,32 @@ static void dcn20_merge_pipes_for_validate(
        }
 }
 
+int dcn20_find_previous_split_count(struct pipe_ctx *pipe)
+{
+       int previous_split = 1;
+       struct pipe_ctx *current_pipe = pipe;
+
+       while (current_pipe->bottom_pipe) {
+               if (current_pipe->plane_state != current_pipe->bottom_pipe->plane_state)
+                       break;
+               previous_split++;
+               current_pipe = current_pipe->bottom_pipe;
+       }
+       current_pipe = pipe;
+       while (current_pipe->top_pipe) {
+               if (current_pipe->plane_state != current_pipe->top_pipe->plane_state)
+                       break;
+               previous_split++;
+               current_pipe = current_pipe->top_pipe;
+       }
+       return previous_split;
+}
+
 int dcn20_validate_apply_pipe_split_flags(
                struct dc *dc,
                struct dc_state *context,
                int vlevel,
-               bool *split,
+               int *split,
                bool *merge)
 {
        int i, pipe_idx, vlevel_split;
@@ -2593,19 +2628,24 @@ int dcn20_validate_apply_pipe_split_flags(
 
        /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
        if (avoid_split) {
+               int max_mpc_comb = context->bw_ctx.dml.vba.maxMpcComb;
+
                for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                        if (!context->res_ctx.pipe_ctx[i].stream)
                                continue;
 
                        for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
-                               if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+                               if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+                                               context->bw_ctx.dml.vba.ModeSupport[vlevel][0])
                                        break;
                        /* Impossible to not split this pipe */
                        if (vlevel > context->bw_ctx.dml.soc.num_states)
                                vlevel = vlevel_split;
+                       else
+                               max_mpc_comb = 0;
                        pipe_idx++;
                }
-               context->bw_ctx.dml.vba.maxMpcComb = 0;
+               context->bw_ctx.dml.vba.maxMpcComb = max_mpc_comb;
        }
 
        /* Split loop sets which pipe should be split based on dml outputs and dc flags */
@@ -2616,8 +2656,14 @@ int dcn20_validate_apply_pipe_split_flags(
                if (!context->res_ctx.pipe_ctx[i].stream)
                        continue;
 
-               if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
-                       split[i] = true;
+               if (force_split
+                               || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1) {
+                       if (context->stream_count == 1 && plane_count == 1
+                                       && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4)
+                               split[i] = 4;
+                       else
+                               split[i] = 2;
+               }
                if ((pipe->stream->view_format ==
                                VIEW_3D_FORMAT_SIDE_BY_SIDE ||
                                pipe->stream->view_format ==
@@ -2626,9 +2672,9 @@ int dcn20_validate_apply_pipe_split_flags(
                                TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
                                 pipe->stream->timing.timing_3d_format ==
                                TIMING_3D_FORMAT_SIDE_BY_SIDE))
-                       split[i] = true;
+                       split[i] = 2;
                if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
-                       split[i] = true;
+                       split[i] = 2;
                        context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
                }
                context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
@@ -2636,39 +2682,58 @@ int dcn20_validate_apply_pipe_split_flags(
 
                if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
                        /*Already split odm pipe tree, don't try to split again*/
-                       split[i] = false;
-                       split[pipe->prev_odm_pipe->pipe_idx] = false;
+                       split[i] = 0;
+                       split[pipe->prev_odm_pipe->pipe_idx] = 0;
                } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
                                && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
-                       /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
-                       split[i] = false;
-                       split[pipe->top_pipe->pipe_idx] = false;
-               } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
-                       if (split[i] == false) {
+                       /*If 2 way split but can support 4 way split, then split each pipe again*/
+                       if (context->stream_count == 1 && plane_count == 1
+                                       && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4) {
+                               split[i] = 2;
+                       } else {
+                               split[i] = 0;
+                               split[pipe->top_pipe->pipe_idx] = 0;
+                       }
+               } else if (pipe->prev_odm_pipe || (dcn20_find_previous_split_count(pipe) == 2 && pipe->top_pipe)) {
+                       if (split[i] == 0) {
                                /*Exiting mpc/odm combine*/
                                merge[i] = true;
-                               if (pipe->prev_odm_pipe) {
-                                       ASSERT(0); /*should not actually happen yet*/
-                                       merge[pipe->prev_odm_pipe->pipe_idx] = true;
-                               } else
-                                       merge[pipe->top_pipe->pipe_idx] = true;
                        } else {
                                /*Transition from mpc combine to odm combine or vice versa*/
                                ASSERT(0); /*should not actually happen yet*/
-                               split[i] = true;
+                               split[i] = 2;
                                merge[i] = true;
                                if (pipe->prev_odm_pipe) {
-                                       split[pipe->prev_odm_pipe->pipe_idx] = true;
+                                       split[pipe->prev_odm_pipe->pipe_idx] = 2;
                                        merge[pipe->prev_odm_pipe->pipe_idx] = true;
                                } else {
-                                       split[pipe->top_pipe->pipe_idx] = true;
+                                       split[pipe->top_pipe->pipe_idx] = 2;
                                        merge[pipe->top_pipe->pipe_idx] = true;
                                }
                        }
+               } else if (dcn20_find_previous_split_count(pipe) == 3) {
+                       if (split[i] == 0 && !pipe->top_pipe) {
+                               merge[pipe->bottom_pipe->pipe_idx] = true;
+                               merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
+                       } else if (split[i] == 2 && !pipe->top_pipe) {
+                               merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
+                               split[i] = 0;
+                       }
+               } else if (dcn20_find_previous_split_count(pipe) == 4) {
+                       if (split[i] == 0 && !pipe->top_pipe) {
+                               merge[pipe->bottom_pipe->pipe_idx] = true;
+                               merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
+                               merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
+                       } else if (split[i] == 2 && !pipe->top_pipe) {
+                               merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
+                               merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
+                               split[i] = 0;
+                       }
                }
 
                /* Adjust dppclk when split is forced, do not bother with dispclk */
-               if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
+               if (split[i] != 0
+                               && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
                        context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
                pipe_idx++;
        }
@@ -2685,7 +2750,7 @@ bool dcn20_fast_validate_bw(
                int *vlevel_out)
 {
        bool out = false;
-       bool split[MAX_PIPES] = { false };
+       int split[MAX_PIPES] = { 0 };
        int pipe_cnt, i, pipe_idx, vlevel;
 
        ASSERT(pipes);
@@ -2745,7 +2810,7 @@ bool dcn20_fast_validate_bw(
                                && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
                        goto validate_fail;
 
-               if (split[i]) {
+               if (split[i] == 2) {
                        if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
                                /* pipe not split previously needs split */
                                hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
@@ -2760,10 +2825,13 @@ bool dcn20_fast_validate_bw(
                                                        pipe, hsplit_pipe))
                                                goto validate_fail;
                                        dcn20_build_mapped_resource(dc, context, pipe->stream);
-                               } else
+                               } else {
                                        dcn20_split_stream_for_mpc(
-                                               &context->res_ctx, dc->res_pool,
-                                               pipe, hsplit_pipe);
+                                                       &context->res_ctx, dc->res_pool,
+                                                       pipe, hsplit_pipe);
+                                       if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
+                                               goto validate_fail;
+                               }
                                pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
                        }
                } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@@ -3003,7 +3071,7 @@ void dcn20_calculate_dlg_params(
                                pipe_idx,
                                cstate_en,
                                context->bw_ctx.bw.dcn.clk.p_state_change_support,
-                               false, false, false);
+                               false, false, true);
 
                context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
                                &context->res_ctx.pipe_ctx[i].rq_regs,
@@ -3064,25 +3132,34 @@ validate_out:
        return out;
 }
 
-
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
-               bool fast_validate)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
+               struct dc_state *context, bool fast_validate)
 {
        bool voltage_supported = false;
        bool full_pstate_supported = false;
        bool dummy_pstate_supported = false;
        double p_state_latency_us;
 
-       DC_FP_START();
        p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
        context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
                dc->debug.disable_dram_clock_change_vactive_support;
+       context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+               dc->debug.enable_dram_clock_change_one_display_vactive;
 
        if (fast_validate) {
-               voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
-
-               DC_FP_END();
-               return voltage_supported;
+               return dcn20_validate_bandwidth_internal(dc, context, true);
        }
 
        // Best case, we support full UCLK switch latency
@@ -3111,7 +3188,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 
 restore_dml_state:
        context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+       return voltage_supported;
+}
 
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+               bool fast_validate)
+{
+       bool voltage_supported = false;
+       DC_FP_START();
+       voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
        DC_FP_END();
        return voltage_supported;
 }
@@ -3170,8 +3255,6 @@ static struct dc_cap_funcs cap_funcs = {
 
 enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
 {
-       enum dc_status result = DC_OK;
-
        enum surface_pixel_format surf_pix_format = plane_state->format;
        unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
 
@@ -3183,12 +3266,13 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
                swizzle = DC_SW_64KB_S;
 
        plane_state->tiling_info.gfx9.swizzle = swizzle;
-       return result;
+       return DC_OK;
 }
 
 static struct resource_funcs dcn20_res_pool_funcs = {
        .destroy = dcn20_destroy_resource_pool,
        .link_enc_create = dcn20_link_encoder_create,
+       .panel_cntl_create = dcn20_panel_cntl_create,
        .validate_bandwidth = dcn20_validate_bandwidth,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -3427,6 +3511,13 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
                bb->dram_clock_change_latency_us =
                                dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
        }
+
+       if ((int)(bb->dummy_pstate_latency_us * 1000)
+                               != dc->bb_overrides.dummy_clock_change_latency_ns
+                       && dc->bb_overrides.dummy_clock_change_latency_ns) {
+               bb->dummy_pstate_latency_us =
+                               dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
+       }
 }
 
 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3662,9 +3753,42 @@ static bool dcn20_resource_construct(
        dc->caps.max_slave_planes = 1;
        dc->caps.post_blend_color_processing = true;
        dc->caps.force_dp_tps4_for_cp2520 = true;
-       dc->caps.hw_3d_lut = true;
        dc->caps.extended_aux_timeout_support = true;
 
+       /* Color pipeline capabilities */
+       dc->caps.color.dpp.dcn_arch = 1;
+       dc->caps.color.dpp.input_lut_shared = 0;
+       dc->caps.color.dpp.icsc = 1;
+       dc->caps.color.dpp.dgam_ram = 1;
+       dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+       dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.post_csc = 0;
+       dc->caps.color.dpp.gamma_corr = 0;
+
+       dc->caps.color.dpp.hw_3d_lut = 1;
+       dc->caps.color.dpp.ogam_ram = 1;
+       // no OGAM ROM on DCN2, only MPC ROM
+       dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+       dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+       dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.ocsc = 0;
+
+       dc->caps.color.mpc.gamut_remap = 0;
+       dc->caps.color.mpc.num_3dluts = 0;
+       dc->caps.color.mpc.shared_3d_lut = 0;
+       dc->caps.color.mpc.ogam_ram = 1;
+       dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+       dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+       dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+       dc->caps.color.mpc.ocsc = 1;
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
                dc->debug = debug_defaults_drv;
        } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 9d5bff9455fd0f7f6425de0fbc9ea9b50b1e7ed1..d5448c9b0e15ea593a7b24fe7ce4f6e49bdd2d06 100644 (file)
@@ -119,11 +119,12 @@ void dcn20_set_mcif_arb_params(
                display_e2e_pipe_params_st *pipes,
                int pipe_cnt);
 bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+int dcn20_find_previous_split_count(struct pipe_ctx *pipe);
 int dcn20_validate_apply_pipe_split_flags(
                struct dc *dc,
                struct dc_state *context,
                int vlevel,
-               bool *split,
+               int *split,
                bool *merge);
 void dcn20_release_dsc(struct resource_context *res_ctx,
                        const struct resource_pool *pool,
index d285ba622d6103c76d7c91ccbc0590dd4ad233e0..960a0716dde53a924e3a24eca0f81178f3ab02a5 100644 (file)
@@ -778,21 +778,28 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_
 {
        struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
        struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
-       struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
-
-       PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
-       PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
-       PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
-       PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
-       PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
-       PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       cmd.PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+       cmd.PLAT_54186_wa.header.payload_bytes = sizeof(cmd.PLAT_54186_wa.flip);
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+       cmd.PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+       cmd.PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+       cmd.PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+       cmd.PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+       cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
 
        PERF_TRACE();  // TODO: remove after performance is stable.
-       dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        PERF_TRACE();  // TODO: remove after performance is stable.
        dc_dmub_srv_cmd_execute(dmcub);
        PERF_TRACE();  // TODO: remove after performance is stable.
index b9ff9767e08fd4680f5f8fb512b81dc5fb7936d8..e97dfaa656e93bf57be23501691932125fb2efa8 100644 (file)
@@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .disable_plane = dcn20_disable_plane,
        .pipe_control_lock = dcn20_pipe_control_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
+       .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn20_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
@@ -85,11 +86,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .optimize_pwr_state = dcn21_optimize_pwr_state,
        .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
-       .set_cursor_position = dcn10_set_cursor_position,
-       .set_cursor_attribute = dcn10_set_cursor_attribute,
-       .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
-       .optimize_pwr_state = dcn21_optimize_pwr_state,
-       .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+       .power_down = dce110_power_down,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dcn21_private_funcs = {
@@ -105,8 +104,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
        .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
        .enable_stream_timing = dcn20_enable_stream_timing,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
        .disable_stream_gating = dcn20_disable_stream_gating,
        .enable_stream_gating = dcn20_enable_stream_gating,
        .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
index e45683ac871a2ba86d898bab5175ad10393db0cd..aa46c35b05a23b29052c22a85890b56e5f8f3643 100644 (file)
@@ -203,29 +203,6 @@ static bool update_cfg_data(
        return true;
 }
 
-void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
-       struct dc_link_settings *link_settings)
-{
-       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-       uint32_t value;
-
-       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
-
-       if (!value && link_settings->lane_count > LANE_COUNT_TWO)
-               link_settings->lane_count = LANE_COUNT_TWO;
-}
-
-bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
-{
-       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-       uint32_t value;
-
-       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
-
-       // if value == 1 alt mode is disabled, otherwise it is enabled
-       return !value;
-}
-
 bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
 {
        struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -348,8 +325,8 @@ static const struct link_encoder_funcs dcn21_link_enc_funcs = {
        .fec_set_ready = enc2_fec_set_ready,
        .fec_is_active = enc2_fec_is_active,
        .get_dig_frontend = dcn10_get_dig_frontend,
-       .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
-       .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+       .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+       .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
 };
 
 void dcn21_link_encoder_construct(
index b25484aa8222f440f0c0910e7e52139adf10e90d..419cdde624f5eae34b8bf41de31cae1e96a6d06a 100644 (file)
@@ -61,6 +61,7 @@
 #include "dcn21_hubbub.h"
 #include "dcn10/dcn10_resource.h"
 #include "dce110/dce110_resource.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "dcn20/dcn20_dwb.h"
 #include "dcn20/dcn20_mmhubbub.h"
@@ -85,6 +86,7 @@
 #include "vm_helper.h"
 #include "dcn20/dcn20_vmid.h"
 #include "dce/dmub_psr.h"
+#include "dce/dmub_abm.h"
 
 #define SOC_BOUNDING_BOX_VALID false
 #define DC_LOGGER_INIT(logger)
@@ -284,7 +286,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .dram_channel_width_bytes = 4,
        .fabric_datapath_to_dcn_data_return_bytes = 32,
        .dcn_downspread_percent = 0.5,
-       .downspread_percent = 0.5,
+       .downspread_percent = 0.38,
        .dram_page_open_time_ns = 50.0,
        .dram_rw_turnaround_time_ns = 17.5,
        .dram_return_buffer_per_channel_bytes = 8192,
@@ -340,6 +342,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## _ ## block ## id
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIF0_BASE__INST0_SEG ## seg
@@ -991,9 +997,12 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
                pool->base.dp_clock_source = NULL;
        }
 
-
-       if (pool->base.abm != NULL)
-               dce_abm_destroy(&pool->base.abm);
+       if (pool->base.abm != NULL) {
+               if (pool->base.abm->ctx->dc->config.disable_dmcu)
+                       dmub_abm_destroy(&pool->base.abm);
+               else
+                       dce_abm_destroy(&pool->base.abm);
+       }
 
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
@@ -1374,64 +1383,50 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
 {
        struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
        struct clk_limit_table *clk_table = &bw_params->clk_table;
-       unsigned int i, j, k;
-       int closest_clk_lvl;
+       struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+       unsigned int i, closest_clk_lvl;
+       int j;
 
        // Default clock levels are used for diags, which may lead to overclocking.
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
+       if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
                dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
                dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
                dcn2_1_soc.num_chans = bw_params->num_channels;
 
-               /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
-               dcn2_1_soc.clock_limits[0].state = 0;
-               dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
-               dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
-               dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
-               dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
-
-               /*
-                * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
-                * as indicator
-                */
-
-               closest_clk_lvl = -1;
-               /* index currently being filled */
-               k = 1;
-               for (i = 1; i < clk_table->num_entries; i++) {
-                       /* loop backwards, skip duplicate state*/
-                       for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
+               ASSERT(clk_table->num_entries);
+               for (i = 0; i < clk_table->num_entries; i++) {
+                       /* loop backwards*/
+                       for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
                                if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
                                        closest_clk_lvl = j;
                                        break;
                                }
                        }
 
-                       /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
-                       if (closest_clk_lvl != -1) {
-                               dcn2_1_soc.clock_limits[k].state = i;
-                               dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
-                               dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
-                               dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
-                               dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
-                               dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
-                               dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
-                               dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
-                               k++;
-                       }
+                       clock_limits[i].state = i;
+                       clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+                       clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+                       clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+                       clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+                       clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+                       clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+                       clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+                       clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+                       clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+                       clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+                       clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+               }
+               for (i = 0; i < clk_table->num_entries; i++)
+                       dcn2_1_soc.clock_limits[i] = clock_limits[i];
+               if (clk_table->num_entries) {
+                       dcn2_1_soc.num_states = clk_table->num_entries;
+                       /* duplicate last level */
+                       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
+                       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
                }
-               dcn2_1_soc.num_states = k;
        }
 
-       /* duplicate last level */
-       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
-       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
-
        dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
 }
 
@@ -1602,6 +1597,18 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
        link_regs(4, E),
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define aux_regs(id)\
 [id] = {\
        DCN2_AUX_REG_LIST(id)\
@@ -1687,6 +1694,24 @@ static struct link_encoder *dcn21_link_encoder_create(
 
        return &enc21->enc10.base;
 }
+
+static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 #define CTX ctx
 
 #define REG(reg_name) \
@@ -1705,12 +1730,8 @@ static int dcn21_populate_dml_pipes_from_context(
 {
        uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
        int i;
-       struct resource_context *res_ctx = &context->res_ctx;
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
 
-               if (!res_ctx->pipe_ctx[i].stream)
-                       continue;
+       for (i = 0; i < pipe_cnt; i++) {
 
                pipes[i].pipe.src.hostvm = 1;
                pipes[i].pipe.src.gpuvm = 1;
@@ -1735,6 +1756,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat
 static struct resource_funcs dcn21_res_pool_funcs = {
        .destroy = dcn21_destroy_resource_pool,
        .link_enc_create = dcn21_link_encoder_create,
+       .panel_cntl_create = dcn21_panel_cntl_create,
        .validate_bandwidth = dcn21_validate_bandwidth,
        .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
        .add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -1781,7 +1803,6 @@ static bool dcn21_resource_construct(
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.max_cursor_size = 256;
        dc->caps.dmdata_alloc_size = 2048;
-       dc->caps.hw_3d_lut = true;
 
        dc->caps.max_slave_planes = 1;
        dc->caps.post_blend_color_processing = true;
@@ -1790,6 +1811,40 @@ static bool dcn21_resource_construct(
        dc->caps.dmcub_support = true;
        dc->caps.is_apu = true;
 
+       /* Color pipeline capabilities */
+       dc->caps.color.dpp.dcn_arch = 1;
+       dc->caps.color.dpp.input_lut_shared = 0;
+       dc->caps.color.dpp.icsc = 1;
+       dc->caps.color.dpp.dgam_ram = 1;
+       dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+       dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.post_csc = 0;
+       dc->caps.color.dpp.gamma_corr = 0;
+
+       dc->caps.color.dpp.hw_3d_lut = 1;
+       dc->caps.color.dpp.ogam_ram = 1;
+       // no OGAM ROM on DCN2
+       dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+       dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+       dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.ocsc = 0;
+
+       dc->caps.color.mpc.gamut_remap = 0;
+       dc->caps.color.mpc.num_3dluts = 0;
+       dc->caps.color.mpc.shared_3d_lut = 0;
+       dc->caps.color.mpc.ogam_ram = 1;
+       dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+       dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+       dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+       dc->caps.color.mpc.ocsc = 1;
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
@@ -1842,17 +1897,19 @@ static bool dcn21_resource_construct(
                goto create_fail;
        }
 
-       pool->base.dmcu = dcn21_dmcu_create(ctx,
-                       &dmcu_regs,
-                       &dmcu_shift,
-                       &dmcu_mask);
-       if (pool->base.dmcu == NULL) {
-               dm_error("DC: failed to create dmcu!\n");
-               BREAK_TO_DEBUGGER();
-               goto create_fail;
+       if (!dc->config.disable_dmcu) {
+               pool->base.dmcu = dcn21_dmcu_create(ctx,
+                               &dmcu_regs,
+                               &dmcu_shift,
+                               &dmcu_mask);
+               if (pool->base.dmcu == NULL) {
+                       dm_error("DC: failed to create dmcu!\n");
+                       BREAK_TO_DEBUGGER();
+                       goto create_fail;
+               }
        }
 
-       if (dc->debug.disable_dmcu) {
+       if (dc->config.disable_dmcu) {
                pool->base.psr = dmub_psr_create(ctx);
 
                if (pool->base.psr == NULL) {
@@ -1862,15 +1919,16 @@ static bool dcn21_resource_construct(
                }
        }
 
-       pool->base.abm = dce_abm_create(ctx,
+       if (dc->config.disable_dmcu)
+               pool->base.abm = dmub_abm_create(ctx,
+                       &abm_regs,
+                       &abm_shift,
+                       &abm_mask);
+       else
+               pool->base.abm = dce_abm_create(ctx,
                        &abm_regs,
                        &abm_shift,
                        &abm_mask);
-       if (pool->base.abm == NULL) {
-               dm_error("DC: failed to create abm!\n");
-               BREAK_TO_DEBUGGER();
-               goto create_fail;
-       }
 
        pool->base.pp_smu = dcn21_pp_smu_create(ctx);
 
index 5bbbafacc72038c8e365727bc270220466f9cc81..80170f9721ce949330d72408b81c48575af13101 100644 (file)
@@ -2599,21 +2599,44 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
                }
        }
 
+       {
+       float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+       int PlaneWithMinActiveDRAMClockChangeMargin = -1;
+
        mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
        for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
                if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
                                < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
                        mode_lib->vba.MinActiveDRAMClockChangeMargin =
                                        mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+                       if (mode_lib->vba.BlendingAndTiming[k] == k) {
+                               PlaneWithMinActiveDRAMClockChangeMargin = k;
+                       } else {
+                               for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+                                       if (mode_lib->vba.BlendingAndTiming[k] == j) {
+                                               PlaneWithMinActiveDRAMClockChangeMargin = j;
+                                       }
+                               }
+                       }
                }
        }
 
        mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
                        mode_lib->vba.MinActiveDRAMClockChangeMargin
                                        + mode_lib->vba.DRAMClockChangeLatency;
+       for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k))
+                               && !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+                               && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+                                               < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+                       SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
+                                       mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+               }
+       }
 
        if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
                        mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+               mode_lib->vba.DRAMClockChangeWatermark += 25;
 
                for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
                        if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
@@ -2622,13 +2645,17 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
                                        mode_lib->vba.MinTTUVBlank[k] += 25;
                        }
                }
-               mode_lib->vba.DRAMClockChangeWatermark += 25;
+
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
        } else if (mode_lib->vba.DummyPStateCheck &&
                        mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
        } else {
-               if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+               if ((mode_lib->vba.SynchronizedVBlank
+                               || mode_lib->vba.NumberOfActivePlanes == 1
+                               || (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 &&
+                                               mode_lib->vba.AllowDramClockChangeOneDisplayVactive))
+                                       && mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
                        mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
                        for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
                                if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
@@ -2640,6 +2667,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
                        mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
                }
        }
+       }
        for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
                for (j = 0; j < 2; j++)
                        mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
index e6617c958bb8bfb26ef690facecec0432ae43fb2..a576eed94d9b0fe5ad8cfecbedaccf09c714945d 100644 (file)
@@ -3190,6 +3190,7 @@ static void CalculateFlipSchedule(
        double TimeForFetchingRowInVBlankImmediateFlip;
        double ImmediateFlipBW;
        double HostVMInefficiencyFactor;
+       double VRatioClamped;
 
        if (GPUVMEnable == true && HostVMEnable == true) {
                HostVMInefficiencyFactor =
@@ -3222,31 +3223,32 @@ static void CalculateFlipSchedule(
 
        *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
        *final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+       VRatioClamped = (VRatio < 1.0) ? 1.0 : VRatio;
        if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
                if (GPUVMEnable == true && DCCEnable != true) {
                        min_row_time = dml_min(
-                                       dpte_row_height * LineTime / VRatio,
-                                       dpte_row_height_chroma * LineTime / (VRatio / 2));
+                                       dpte_row_height * LineTime / VRatioClamped,
+                                       dpte_row_height_chroma * LineTime / (VRatioClamped / 2));
                } else if (GPUVMEnable != true && DCCEnable == true) {
                        min_row_time = dml_min(
-                                       meta_row_height * LineTime / VRatio,
-                                       meta_row_height_chroma * LineTime / (VRatio / 2));
+                                       meta_row_height * LineTime / VRatioClamped,
+                                       meta_row_height_chroma * LineTime / (VRatioClamped / 2));
                } else {
                        min_row_time = dml_min4(
-                                       dpte_row_height * LineTime / VRatio,
-                                       meta_row_height * LineTime / VRatio,
-                                       dpte_row_height_chroma * LineTime / (VRatio / 2),
-                                       meta_row_height_chroma * LineTime / (VRatio / 2));
+                                       dpte_row_height * LineTime / VRatioClamped,
+                                       meta_row_height * LineTime / VRatioClamped,
+                                       dpte_row_height_chroma * LineTime / (VRatioClamped / 2),
+                                       meta_row_height_chroma * LineTime / (VRatioClamped / 2));
                }
        } else {
                if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dpte_row_height * LineTime / VRatio;
+                       min_row_time = dpte_row_height * LineTime / VRatioClamped;
                } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = meta_row_height * LineTime / VRatio;
+                       min_row_time = meta_row_height * LineTime / VRatioClamped;
                } else {
                        min_row_time = dml_min(
-                                       dpte_row_height * LineTime / VRatio,
-                                       meta_row_height * LineTime / VRatio);
+                                       dpte_row_height * LineTime / VRatioClamped,
+                                       meta_row_height * LineTime / VRatioClamped);
                }
        }
 
@@ -5944,7 +5946,7 @@ static void CalculateMetaAndPTETimes(
                                                * PixelPTEReqHeightY[k];
                        }
                        dpte_groups_per_row_luma_ub = dml_ceil(
-                                       dpte_row_width_luma_ub[k] / dpte_group_width_luma,
+                                       (float) dpte_row_width_luma_ub[k] / dpte_group_width_luma,
                                        1);
                        time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k]
                                        / PixelClock[k] / dpte_groups_per_row_luma_ub;
@@ -5968,7 +5970,7 @@ static void CalculateMetaAndPTETimes(
                                                        * PixelPTEReqHeightC[k];
                                }
                                dpte_groups_per_row_chroma_ub = dml_ceil(
-                                               dpte_row_width_chroma_ub[k]
+                                               (float) dpte_row_width_chroma_ub[k]
                                                                / dpte_group_width_chroma,
                                                1);
                                time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k]
index a38baa73d4841af90d8ea5381ba4fc9ef7d22861..90a5fefef05b10566fc036e7beffde338005b59f 100644 (file)
@@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params(
        min_hratio_fact_l = 1.0;
        min_hratio_fact_c = 1.0;
 
-       if (htaps_l <= 1)
+       if (hratio_l <= 1)
                min_hratio_fact_l = 2.0;
        else if (htaps_l <= 6) {
                if ((hratio_l * 2.0) > 4.0)
@@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params(
 
        hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
 
-       if (htaps_c <= 1)
+       if (hratio_c <= 1)
                min_hratio_fact_c = 2.0;
        else if (htaps_c <= 6) {
                if ((hratio_c * 2.0) > 4.0)
@@ -1490,19 +1490,30 @@ static void dml_rq_dlg_get_dlg_params(
        disp_dlg_regs->refcyc_per_pte_group_vblank_l =
                        (unsigned int) (dst_y_per_row_vblank * (double) htotal
                                        * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
-       ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
+       if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+                       disp_dlg_regs->refcyc_per_pte_group_vblank_l >= (unsigned int)dml_pow(2, 13))
+               disp_dlg_regs->refcyc_per_pte_group_vblank_l = (1 << 13) - 1;
+       else
+               ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
 
        if (dual_plane) {
                disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
                                * (double) htotal * ref_freq_to_pix_freq
                                / (double) dpte_groups_per_row_ub_c);
-               ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+               if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+                               disp_dlg_regs->refcyc_per_pte_group_vblank_c >= (unsigned int)dml_pow(2, 13))
+                       disp_dlg_regs->refcyc_per_pte_group_vblank_c = (1 << 13) - 1;
+               else
+                       ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
                                < (unsigned int)dml_pow(2, 13));
        }
 
-       disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+       if (src->dcc)
+               disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
                        (unsigned int) (dst_y_per_row_vblank * (double) htotal
                                        * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+       else
+               disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = 0;
        ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
 
        disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
@@ -1522,8 +1533,8 @@ static void dml_rq_dlg_get_dlg_params(
 
        disp_dlg_regs->refcyc_per_vm_group_vblank   = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
        disp_dlg_regs->refcyc_per_vm_group_flip     = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
-       disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
-       disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+       disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
+       disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
 
        // Clamp to max for now
        if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
index 687010c17324cc595eb70ae81136fa6ca0af14a7..439ffd04be34c69734b2f05657f6158cc0e1ea64 100644 (file)
@@ -118,9 +118,11 @@ struct _vcs_dpi_soc_bounding_box_st {
        double urgent_latency_adjustment_fabric_clock_component_us;
        double urgent_latency_adjustment_fabric_clock_reference_mhz;
        bool disable_dram_clock_change_vactive_support;
+       bool allow_dram_clock_one_display_vactive;
 };
 
 struct _vcs_dpi_ip_params_st {
+       bool use_min_dcfclk;
        bool gpuvm_enable;
        bool hostvm_enable;
        unsigned int gpuvm_max_page_table_levels;
index 6b525c52124c6aa3b93dd3ee3cb0665727a9f2ac..b19988f547218a0aaaa5b8ffbb72b032c073a6a9 100644 (file)
@@ -224,6 +224,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
        mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
        mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
                        mode_lib->vba.DummyPStateCheck;
+       mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
 
        mode_lib->vba.Downspreading = soc->downspread_percent;
        mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes;   // new!
@@ -280,6 +281,7 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
        ip_params_st *ip = &mode_lib->vba.ip;
 
        // IP Parameters
+       mode_lib->vba.UseMinimumRequiredDCFCLK = ip->use_min_dcfclk;
        mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
        mode_lib->vba.MaxNumOTG = ip->max_num_otg;
        mode_lib->vba.MaxNumHDMIFRLOutputs = ip->max_num_hdmi_frl_outputs;
index 5d82fc5a7ed7258d4c34217272eb2a7d3cf27728..6a7b20927a6be4da9e5410fb90ff2ca770652a19 100644 (file)
@@ -898,6 +898,8 @@ struct vba_vars_st {
        bool dummystring[DC__NUM_DPP__MAX];
        double BPP;
        enum odm_combine_policy ODMCombinePolicy;
+       bool UseMinimumRequiredDCFCLK;
+       bool AllowDramClockChangeOneDisplayVactive;
 };
 
 bool CalculateMinAndMaxPrefetchMode(
index 87d682d25278ab4a8f5f41e9a586112299006f3c..0ea6662a1563215e5aadc309b0ed02467f711db5 100644 (file)
@@ -129,7 +129,7 @@ static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *lin
 static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
 {
        switch (dpcd_throughput) {
-       case DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED:
+       case DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED:
                *throughput = 0;
                break;
        case DP_DSC_THROUGHPUT_MODE_0_170:
index 6f730b5bfe425cb06c220d79acdc4a17769e9de8..5e384a8a83dc21713d1a8bde89b779826713fca4 100644 (file)
@@ -322,3 +322,92 @@ static const struct protection_properties dp_11_protection = {
        .process_transaction = dp_11_process_transaction
 };
 
+static const struct protection_properties *get_protection_properties_by_signal(
+       struct dc_link *link,
+       enum signal_type st,
+       enum hdcp_version version)
+{
+       switch (version) {
+       case HDCP_VERSION_14:
+               switch (st) {
+               case SIGNAL_TYPE_DVI_SINGLE_LINK:
+               case SIGNAL_TYPE_DVI_DUAL_LINK:
+               case SIGNAL_TYPE_HDMI_TYPE_A:
+                       return &hdmi_14_protection;
+               case SIGNAL_TYPE_DISPLAY_PORT:
+                       if (link &&
+                               (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+                               link->dpcd_caps.dongle_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)) {
+                               return &non_supported_protection;
+                       }
+                       return &dp_11_protection;
+               case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               case SIGNAL_TYPE_EDP:
+                       return &dp_11_protection;
+               default:
+                       return &non_supported_protection;
+               }
+               break;
+       case HDCP_VERSION_22:
+               switch (st) {
+               case SIGNAL_TYPE_DVI_SINGLE_LINK:
+               case SIGNAL_TYPE_DVI_DUAL_LINK:
+               case SIGNAL_TYPE_HDMI_TYPE_A:
+                       return &hdmi_14_protection; //todo version2.2
+               case SIGNAL_TYPE_DISPLAY_PORT:
+               case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               case SIGNAL_TYPE_EDP:
+                       return &dp_11_protection;  //todo version2.2
+               default:
+                       return &non_supported_protection;
+               }
+               break;
+       default:
+               return &non_supported_protection;
+       }
+}
+
+enum hdcp_message_status dc_process_hdcp_msg(
+       enum signal_type signal,
+       struct dc_link *link,
+       struct hdcp_protection_message *message_info)
+{
+       enum hdcp_message_status status = HDCP_MESSAGE_FAILURE;
+       uint32_t i = 0;
+
+       const struct protection_properties *protection_props;
+
+       if (!message_info)
+               return HDCP_MESSAGE_UNSUPPORTED;
+
+       if (message_info->msg_id < HDCP_MESSAGE_ID_READ_BKSV ||
+               message_info->msg_id >= HDCP_MESSAGE_ID_MAX)
+               return HDCP_MESSAGE_UNSUPPORTED;
+
+       protection_props =
+               get_protection_properties_by_signal(
+                       link,
+                       signal,
+                       message_info->version);
+
+       if (!protection_props->supported)
+               return HDCP_MESSAGE_UNSUPPORTED;
+
+       if (protection_props->process_transaction(
+               link,
+               message_info)) {
+               status = HDCP_MESSAGE_SUCCESS;
+       } else {
+               for (i = 0; i < message_info->max_retries; i++) {
+                       if (protection_props->process_transaction(
+                                               link,
+                                               message_info)) {
+                               status = HDCP_MESSAGE_SUCCESS;
+                               break;
+                       }
+               }
+       }
+
+       return status;
+}
+
index d523fc9547e705d66bf5322384f3a91e1eb78045..c7fd702a4a87db0e91141e8c602d9f303d66a246 100644 (file)
@@ -38,6 +38,7 @@
 #endif
 #include "dwb.h"
 #include "mcif_wb.h"
+#include "panel_cntl.h"
 
 #define MAX_CLOCK_SOURCES 7
 
@@ -92,6 +93,8 @@ struct clk_bw_params;
 struct resource_funcs {
        void (*destroy)(struct resource_pool **pool);
        void (*link_init)(struct dc_link *link);
+       struct panel_cntl*(*panel_cntl_create)(
+               const struct panel_cntl_init_data *panel_cntl_init_data);
        struct link_encoder *(*link_enc_create)(
                        const struct encoder_init_data *init);
        bool (*validate_bandwidth)(
index d607b3191954fc57b2598c8962d3980d1ed15a00..e8ce8c85adf1c48cf1aaf7683452f2482e8d9a87 100644 (file)
 
 #include "dm_services_types.h"
 
-struct abm_backlight_registers {
-       unsigned int BL_PWM_CNTL;
-       unsigned int BL_PWM_CNTL2;
-       unsigned int BL_PWM_PERIOD_CNTL;
-       unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
-};
-
 struct abm {
        struct dc_context *ctx;
        const struct abm_funcs *funcs;
        bool dmcu_is_running;
-       /* registers setting needs to be saved and restored at InitBacklight */
-       struct abm_backlight_registers stored_backlight_registers;
 };
 
 struct abm_funcs {
-       void (*abm_init)(struct abm *abm);
+       void (*abm_init)(struct abm *abm, uint32_t back_light);
        bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
-       bool (*set_abm_immediate_disable)(struct abm *abm);
-       bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
-       bool (*init_backlight)(struct abm *abm);
+       bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
+       bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
 
        /* backlight_pwm_u16_16 is unsigned 32 bit,
         * 16 bit integer + 16 fractional, where 1.0 is max backlight value.
@@ -56,10 +46,13 @@ struct abm_funcs {
                        unsigned int backlight_pwm_u16_16,
                        unsigned int frame_ramp,
                        unsigned int controller_id,
-                       bool use_smooth_brightness);
+                       unsigned int panel_inst);
 
        unsigned int (*get_current_backlight)(struct abm *abm);
        unsigned int (*get_target_backlight)(struct abm *abm);
+       bool (*init_abm_config)(struct abm *abm,
+                       const char *src,
+                       unsigned int bytes);
 };
 
 #endif
index f5dd0cc73c63a9116b03f996a126649891ba96f6..47a566d82d6e4b79890a14ee517ba5c0b5a47f51 100644 (file)
@@ -144,6 +144,8 @@ struct hubbub_funcs {
        void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
 
        void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
+
+       void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);
 };
 
 struct hubbub {
index 094afc4c817310332dd78521ee1ebe56e79094b1..50ee8aa7ec3b3317ababfea194699af9d9aaef48 100644 (file)
@@ -210,6 +210,22 @@ struct mpc_funcs {
                struct mpcc_blnd_cfg *blnd_cfg,
                int mpcc_id);
 
+       /*
+        * Lock cursor updates for the specified OPP.
+        * OPP defines the set of MPCC that are locked together for cursor.
+        *
+        * Parameters:
+        * [in]         mpc             - MPC context.
+        * [in]     opp_id      - The OPP to lock cursor updates on
+        * [in]         lock    - lock/unlock the OPP
+        *
+        * Return:  void
+        */
+       void (*cursor_lock)(
+                       struct mpc *mpc,
+                       int opp_id,
+                       bool lock);
+
        struct mpcc* (*get_mpcc_for_dpp)(
                        struct mpc_tree *tree,
                        int dpp_id);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
new file mode 100644 (file)
index 0000000..f9ab5ab
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * panel_cntl.h
+ *
+ *  Created on: Oct 6, 2015
+ *      Author: yonsun
+ */
+
+#ifndef DC_PANEL_CNTL_H_
+#define DC_PANEL_CNTL_H_
+
+#include "dc_types.h"
+
+#define MAX_BACKLIGHT_LEVEL 0xFFFF
+
+struct panel_cntl_backlight_registers {
+       unsigned int BL_PWM_CNTL;
+       unsigned int BL_PWM_CNTL2;
+       unsigned int BL_PWM_PERIOD_CNTL;
+       unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+struct panel_cntl_funcs {
+       void (*destroy)(struct panel_cntl **panel_cntl);
+       uint32_t (*hw_init)(struct panel_cntl *panel_cntl);
+       bool (*is_panel_backlight_on)(struct panel_cntl *panel_cntl);
+       bool (*is_panel_powered_on)(struct panel_cntl *panel_cntl);
+       void (*store_backlight_level)(struct panel_cntl *panel_cntl);
+       void (*driver_set_backlight)(struct panel_cntl *panel_cntl,
+                       uint32_t backlight_pwm_u16_16);
+};
+
+struct panel_cntl_init_data {
+       struct dc_context *ctx;
+       uint32_t inst;
+};
+
+struct panel_cntl {
+       const struct panel_cntl_funcs *funcs;
+       struct dc_context *ctx;
+       uint32_t inst;
+       /* registers setting needs to be saved and restored at InitBacklight */
+       struct panel_cntl_backlight_registers stored_backlight_registers;
+};
+
+#endif /* DC_PANEL_CNTL_H_ */
index e5e7d94026fc6b7ed0c796ff508d7f601cbbcd32..f803191e3134ad5db9ffc2282b7934c0156a77fe 100644 (file)
@@ -117,6 +117,9 @@ struct crc_params {
 
        enum crc_selection selection;
 
+       uint8_t dsc_mode;
+       uint8_t odm_mode;
+
        bool continuous_mode;
        bool enable;
 };
index fecc80c47c267eb015572bf6df6a3708088e73ef..2947d1b15512910658a06a07792414bfab21fc25 100644 (file)
@@ -173,6 +173,8 @@ struct scaler_data {
        struct scaling_taps taps;
        struct rect viewport;
        struct rect viewport_c;
+       struct rect viewport_unadjusted;
+       struct rect viewport_c_unadjusted;
        struct rect recout;
        struct scaling_ratios ratios;
        struct scl_inits inits;
index d4c1fb242c6378a0b8439a7075eba0f288a89fbc..3b2ea9bdb62c2522457d6f3bf22085df5da40251 100644 (file)
@@ -75,9 +75,13 @@ struct hw_sequencer_funcs {
        void (*wait_for_mpcc_disconnect)(struct dc *dc,
                        struct resource_pool *res_pool,
                        struct pipe_ctx *pipe_ctx);
+       void (*edp_backlight_control)(
+                       struct dc_link *link,
+                       bool enable);
        void (*program_triplebuffer)(const struct dc *dc,
                struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
        void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
+       void (*power_down)(struct dc *dc);
 
        /* Pipe Lock Related */
        void (*pipe_control_lock)(struct dc *dc,
@@ -86,6 +90,7 @@ struct hw_sequencer_funcs {
                        struct dc_state *context, bool lock);
        void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
                        bool flip_immediate);
+       void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
 
        /* Timing Related */
        void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
@@ -187,6 +192,12 @@ struct hw_sequencer_funcs {
                        unsigned int bufSize, unsigned int mask);
        void (*clear_status_bits)(struct dc *dc, unsigned int mask);
 
+       bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx,
+                       uint32_t backlight_pwm_u16_16,
+                       uint32_t frame_ramp);
+
+       void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
+
 
 };
 
index 52a26e6be066b4ff978237d0feb76fb1c33e8e12..36e906bb6bfc5eded324c0a0c5adb2ecb4d95722 100644 (file)
@@ -100,8 +100,6 @@ struct hwseq_private_funcs {
                        struct dc *dc);
        void (*edp_backlight_control)(struct dc_link *link,
                        bool enable);
-       bool (*is_panel_backlight_on)(struct dc_link *link);
-       bool (*is_panel_powered_on)(struct dc_link *link);
        void (*setup_vupdate_interrupt)(struct dc *dc,
                        struct pipe_ctx *pipe_ctx);
        bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
index ca4c36c0c9bcf9b0db7b1e65f93c1ed4399d28fc..109c589eb97c2438d690aeb8b207ed04d7a68ffc 100644 (file)
@@ -138,9 +138,6 @@ struct pipe_ctx *find_idle_secondary_pipe(
                const struct resource_pool *pool,
                const struct pipe_ctx *primary_pipe);
 
-bool resource_is_stream_unchanged(
-       struct dc_state *old_context, struct dc_stream_state *stream);
-
 bool resource_validate_attach_surfaces(
                const struct dc_validation_set set[],
                int set_count,
index c34eba19860a32a9952f26ab094dd7266552c953..6d7bca562eec00cb2093edbf6d09e833a6382206 100644 (file)
 #define ASSERT(expr) ASSERT_CRITICAL(expr)
 
 #else
-#define ASSERT(expr) WARN_ON(!(expr))
+#define ASSERT(expr) WARN_ON_ONCE(!(expr))
 #endif
 
 #define BREAK_TO_DEBUGGER() ASSERT(0)
index 3464b2d5b89afa1d90dec9bb83ead082c1ba9986..348e9a600a728961c14d8ee02d392e63c9f38182 100644 (file)
@@ -84,6 +84,14 @@ static void virtual_link_encoder_destroy(struct link_encoder **enc)
        *enc = NULL;
 }
 
+static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc,
+               struct dc_link_settings *link_settings)
+{
+       /* Set Default link settings */
+       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+                               LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+       *link_settings = max_link_cap;
+}
 
 static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
        .validate_output_with_stream =
@@ -94,6 +102,7 @@ static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
        .enable_dp_output = virtual_link_encoder_enable_dp_output,
        .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
        .disable_output = virtual_link_encoder_disable_output,
+       .get_max_link_cap = virtual_link_encoder_get_max_link_cap,
        .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
        .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
        .update_mst_stream_allocation_table =
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
new file mode 100644 (file)
index 0000000..26d94eb
--- /dev/null
@@ -0,0 +1,583 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_SRV_H_
+#define _DMUB_SRV_H_
+
+/**
+ * DOC: DMUB interface and operation
+ *
+ * DMUB is the interface to the display DMCUB microcontroller on DCN hardware.
+ * It delegates hardware initialization and command submission to the
+ * microcontroller. DMUB is the shortname for DMCUB.
+ *
+ * This interface is not thread-safe. Ensure that all access to the interface
+ * is properly synchronized by the caller.
+ *
+ * Initialization and usage of the DMUB service should be done in the
+ * steps given below:
+ *
+ * 1. dmub_srv_create()
+ * 2. dmub_srv_has_hw_support()
+ * 3. dmub_srv_calc_region_info()
+ * 4. dmub_srv_hw_init()
+ *
+ * The call to dmub_srv_create() is required to use the server.
+ *
+ * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info()
+ * are helpers to query cache window size and allocate framebuffer(s)
+ * for the cache windows.
+ *
+ * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
+ * for command submission. Commands can be queued via dmub_srv_cmd_queue()
+ * and executed via dmub_srv_cmd_execute().
+ *
+ * If the queue is full the dmub_srv_wait_for_idle() call can be used to
+ * wait until the queue has been cleared.
+ *
+ * Destroying the DMUB service can be done by calling dmub_srv_destroy().
+ * This does not clear DMUB hardware state, only software state.
+ *
+ * The interface is intended to be standalone and should not depend on any
+ * other component within DAL.
+ */
+
+#include "inc/dmub_types.h"
+#include "inc/dmub_cmd.h"
+#include "inc/dmub_gpint_cmd.h"
+#include "inc/dmub_cmd_dal.h"
+#include "inc/dmub_rb.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Forward declarations */
+struct dmub_srv;
+struct dmub_srv_common_regs;
+
+/* enum dmub_status - return code for dmcub functions */
+enum dmub_status {
+       DMUB_STATUS_OK = 0,
+       DMUB_STATUS_NO_CTX,
+       DMUB_STATUS_QUEUE_FULL,
+       DMUB_STATUS_TIMEOUT,
+       DMUB_STATUS_INVALID,
+};
+
+/* enum dmub_asic - dmub asic identifier */
+enum dmub_asic {
+       DMUB_ASIC_NONE = 0,
+       DMUB_ASIC_DCN20,
+       DMUB_ASIC_DCN21,
+       DMUB_ASIC_MAX,
+};
+
+/* enum dmub_window_id - dmub window identifier */
+enum dmub_window_id {
+       DMUB_WINDOW_0_INST_CONST = 0,
+       DMUB_WINDOW_1_STACK,
+       DMUB_WINDOW_2_BSS_DATA,
+       DMUB_WINDOW_3_VBIOS,
+       DMUB_WINDOW_4_MAILBOX,
+       DMUB_WINDOW_5_TRACEBUFF,
+       DMUB_WINDOW_6_FW_STATE,
+       DMUB_WINDOW_7_SCRATCH_MEM,
+       DMUB_WINDOW_TOTAL,
+};
+
+/**
+ * struct dmub_region - dmub hw memory region
+ * @base: base address for region, must be 256 byte aligned
+ * @top: top address for region
+ */
+struct dmub_region {
+       uint32_t base;
+       uint32_t top;
+};
+
+/**
+ * struct dmub_window - dmub hw cache window
+ * @off: offset to the fb memory in gpu address space
+ * @r: region in uc address space for cache window
+ */
+struct dmub_window {
+       union dmub_addr offset;
+       struct dmub_region region;
+};
+
+/**
+ * struct dmub_fb - defines a dmub framebuffer memory region
+ * @cpu_addr: cpu virtual address for the region, NULL if invalid
+ * @gpu_addr: gpu virtual address for the region, NULL if invalid
+ * @size: size of the region in bytes, zero if invalid
+ */
+struct dmub_fb {
+       void *cpu_addr;
+       uint64_t gpu_addr;
+       uint32_t size;
+};
+
+/**
+ * struct dmub_srv_region_params - params used for calculating dmub regions
+ * @inst_const_size: size of the fw inst const section
+ * @bss_data_size: size of the fw bss data section
+ * @vbios_size: size of the vbios data
+ * @fw_bss_data: raw firmware bss data section
+ */
+struct dmub_srv_region_params {
+       uint32_t inst_const_size;
+       uint32_t bss_data_size;
+       uint32_t vbios_size;
+       const uint8_t *fw_inst_const;
+       const uint8_t *fw_bss_data;
+};
+
+/**
+ * struct dmub_srv_region_info - output region info from the dmub service
+ * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes
+ * @num_regions: number of regions used by the dmub service
+ * @regions: region info
+ *
+ * The regions are aligned such that they can be all placed within the
+ * same framebuffer but they can also be placed into different framebuffers.
+ *
+ * The size of each region can be calculated by the caller:
+ * size = reg.top - reg.base
+ *
+ * Care must be taken when performing custom allocations to ensure that each
+ * region base address is 256 byte aligned.
+ */
+struct dmub_srv_region_info {
+       uint32_t fb_size;
+       uint8_t num_regions;
+       struct dmub_region regions[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_fb_params - parameters used for driver fb setup
+ * @region_info: region info calculated by dmub service
+ * @cpu_addr: base cpu address for the framebuffer
+ * @gpu_addr: base gpu virtual address for the framebuffer
+ */
+struct dmub_srv_fb_params {
+       const struct dmub_srv_region_info *region_info;
+       void *cpu_addr;
+       uint64_t gpu_addr;
+};
+
+/**
+ * struct dmub_srv_fb_info - output fb info from the dmub service
+ * @num_fbs: number of required dmub framebuffers
+ * @fbs: fb data for each region
+ *
+ * Output from the dmub service helper that can be used by the
+ * driver to prepare dmub_fb that can be passed into the dmub
+ * hw init service.
+ *
+ * Assumes that all regions are within the same framebuffer
+ * and have been setup according to the region_info generated
+ * by the dmub service.
+ */
+struct dmub_srv_fb_info {
+       uint8_t num_fb;
+       struct dmub_fb fb[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_base_funcs - Driver specific base callbacks
+ */
+struct dmub_srv_base_funcs {
+       /**
+        * @reg_read:
+        *
+        * Hook for reading a register.
+        *
+        * Return: The 32-bit register value from the given address.
+        */
+       uint32_t (*reg_read)(void *ctx, uint32_t address);
+
+       /**
+        * @reg_write:
+        *
+        * Hook for writing a value to the register specified by address.
+        */
+       void (*reg_write)(void *ctx, uint32_t address, uint32_t value);
+};
+
+/**
+ * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub
+ */
+struct dmub_srv_hw_funcs {
+       /* private: internal use only */
+
+       void (*init)(struct dmub_srv *dmub);
+
+       void (*reset)(struct dmub_srv *dmub);
+
+       void (*reset_release)(struct dmub_srv *dmub);
+
+       void (*backdoor_load)(struct dmub_srv *dmub,
+                             const struct dmub_window *cw0,
+                             const struct dmub_window *cw1);
+
+       void (*setup_windows)(struct dmub_srv *dmub,
+                             const struct dmub_window *cw2,
+                             const struct dmub_window *cw3,
+                             const struct dmub_window *cw4,
+                             const struct dmub_window *cw5,
+                             const struct dmub_window *cw6);
+
+       void (*setup_mailbox)(struct dmub_srv *dmub,
+                             const struct dmub_region *inbox1);
+
+       uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
+
+       void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+
+       bool (*is_supported)(struct dmub_srv *dmub);
+
+       bool (*is_hw_init)(struct dmub_srv *dmub);
+
+       bool (*is_phy_init)(struct dmub_srv *dmub);
+
+       bool (*is_auto_load_done)(struct dmub_srv *dmub);
+
+       void (*set_gpint)(struct dmub_srv *dmub,
+                         union dmub_gpint_data_register reg);
+
+       bool (*is_gpint_acked)(struct dmub_srv *dmub,
+                              union dmub_gpint_data_register reg);
+
+       uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
+};
+
+/**
+ * struct dmub_srv_create_params - params for dmub service creation
+ * @base_funcs: driver supplied base routines
+ * @hw_funcs: optional overrides for hw funcs
+ * @user_ctx: context data for callback funcs
+ * @asic: driver supplied asic
+ * @is_virtual: false for hw support only
+ */
+struct dmub_srv_create_params {
+       struct dmub_srv_base_funcs funcs;
+       struct dmub_srv_hw_funcs *hw_funcs;
+       void *user_ctx;
+       enum dmub_asic asic;
+       bool is_virtual;
+};
+
+/*
+ * struct dmub_srv_hw_params - params for dmub hardware initialization
+ * @fb: framebuffer info for each region
+ * @fb_base: base of the framebuffer aperture
+ * @fb_offset: offset of the framebuffer aperture
+ * @psp_version: psp version to pass for DMCU init
+ * @load_inst_const: true if DMUB should load inst const fw
+ */
+struct dmub_srv_hw_params {
+       struct dmub_fb *fb[DMUB_WINDOW_TOTAL];
+       uint64_t fb_base;
+       uint64_t fb_offset;
+       uint32_t psp_version;
+       bool load_inst_const;
+};
+
+/**
+ * struct dmub_srv - software state for dmcub
+ * @asic: dmub asic identifier
+ * @user_ctx: user provided context for the dmub_srv
+ * @is_virtual: false if hardware support only
+ * @fw_state: dmub firmware state pointer
+ */
+struct dmub_srv {
+       enum dmub_asic asic;
+       void *user_ctx;
+       bool is_virtual;
+       struct dmub_fb scratch_mem_fb;
+       volatile const struct dmub_fw_state *fw_state;
+
+       /* private: internal use only */
+       const struct dmub_srv_common_regs *regs;
+
+       struct dmub_srv_base_funcs funcs;
+       struct dmub_srv_hw_funcs hw_funcs;
+       struct dmub_rb inbox1_rb;
+
+       bool sw_init;
+       bool hw_init;
+
+       uint64_t fb_base;
+       uint64_t fb_offset;
+       uint32_t psp_version;
+};
+
+/**
+ * dmub_srv_create() - creates the DMUB service.
+ * @dmub: the dmub service
+ * @params: creation parameters for the service
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
+                                const struct dmub_srv_create_params *params);
+
+/**
+ * dmub_srv_destroy() - destroys the DMUB service.
+ * @dmub: the dmub service
+ */
+void dmub_srv_destroy(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_calc_region_info() - retreives region info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate region locations
+ * @info_out: the output region info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_calc_region_info(struct dmub_srv *dmub,
+                         const struct dmub_srv_region_params *params,
+                         struct dmub_srv_region_info *out);
+
+/**
+ * dmub_srv_calc_region_info() - retreives fb info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate fb locations
+ * @info_out: the output fb info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+                                      const struct dmub_srv_fb_params *params,
+                                      struct dmub_srv_fb_info *out);
+
+/**
+ * dmub_srv_has_hw_support() - returns hw support state for dmcub
+ * @dmub: the dmub service
+ * @is_supported: hw support state
+ *
+ * Queries the hardware for DMCUB support and returns the result.
+ *
+ * Can be called before dmub_srv_hw_init().
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
+                                        bool *is_supported);
+
+/**
+ * dmub_srv_is_hw_init() - returns hardware init state
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init);
+
+/**
+ * dmub_srv_hw_init() - initializes the underlying DMUB hardware
+ * @dmub: the dmub service
+ * @params: params for hardware initialization
+ *
+ * Resets the DMUB hardware and performs backdoor loading of the
+ * required cache regions based on the input framebuffer regions.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_NO_CTX - dmcub context not initialized
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+                                 const struct dmub_srv_hw_params *params);
+
+/**
+ * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized
+ * @dmub: the dmub service
+ *
+ * Before destroying the DMUB service or releasing the backing framebuffer
+ * memory we'll need to put the DMCUB into reset first.
+ *
+ * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * @dmub: the dmub service
+ * @cmd: the command to queue
+ *
+ * Queues a command to the DMUB service but does not begin execution
+ * immediately.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_QUEUE_FULL - no remaining room in queue
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+                                   const union dmub_rb_cmd *cmd);
+
+/**
+ * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * @dmub: the dmub service
+ *
+ * Begins execution of queued commands on the dmub.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until firmware has been autoloaded by the DMCUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without firmware autoload support this function will return
+ * immediately.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
+                                            uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the PHY has been initialized by the DMUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without PHY init support this function will return
+ * immediately.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
+                                           uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the DMUB buffer is empty and all commands have
+ * finished processing. The maximum wait time is given in
+ * microseconds to prevent spinning forever.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
+                                       uint32_t timeout_us);
+
+/**
+ * dmub_srv_send_gpint_command() - Sends a GPINT based command.
+ * @dmub: the dmub service
+ * @command_code: the command code to send
+ * @param: the command parameter to send
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Sends a command via the general purpose interrupt (GPINT).
+ * Waits for the number of microseconds specified by timeout_us
+ * for the command ACK before returning.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for ACK timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+                           enum dmub_gpint_command command_code,
+                           uint16_t param, uint32_t timeout_us);
+
+/**
+ * dmub_srv_get_gpint_response() - Queries the GPINT response.
+ * @dmub: the dmub service
+ * @response: the response for the last GPINT
+ *
+ * Returns the response code for the last GPINT interrupt.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+                                            uint32_t *response);
+
+/**
+ * dmub_flush_buffer_mem() - Read back entire frame buffer region.
+ * This ensures that the write from x86 has been flushed and will not
+ * hang the DMCUB.
+ * @fb: frame buffer to flush
+ *
+ * Can be called after software initialization.
+ */
+void dmub_flush_buffer_mem(const struct dmub_fb *fb);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_SRV_H_ */
index 10b5fa9d25884bdc0e00b7b7a42660048966f3f2..599bf2055bcb531e7a0d8f84c4bb4941731940aa 100644 (file)
@@ -228,6 +228,7 @@ struct dmub_cmd_psr_copy_settings_data {
        uint8_t smu_optimizations_en;
        uint8_t frame_delay;
        uint8_t frame_cap_ind;
+       struct dmub_psr_debug_flags debug;
 };
 
 struct dmub_rb_cmd_psr_copy_settings {
@@ -260,6 +261,8 @@ struct dmub_rb_cmd_psr_set_version {
 struct dmub_cmd_abm_set_pipe_data {
        uint32_t ramping_boundary;
        uint32_t otg_inst;
+       uint32_t panel_inst;
+       uint32_t set_pipe_option;
 };
 
 struct dmub_rb_cmd_abm_set_pipe {
@@ -303,6 +306,16 @@ struct dmub_rb_cmd_abm_set_pwm_frac {
        struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
 };
 
+struct dmub_cmd_abm_init_config_data {
+       union dmub_addr src;
+       uint16_t bytes;
+};
+
+struct dmub_rb_cmd_abm_init_config {
+       struct dmub_cmd_header header;
+       struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
 union dmub_rb_cmd {
        struct dmub_rb_cmd_read_modify_write read_modify_write;
        struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
@@ -324,6 +337,7 @@ union dmub_rb_cmd {
        struct dmub_rb_cmd_abm_set_level abm_set_level;
        struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
        struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
+       struct dmub_rb_cmd_abm_init_config abm_init_config;
 };
 
 #pragma pack(pop)
index d37535d219285c9e032f76c630089caaa2837148..e42de9ded275e31d2cd608f3af3afc5725c21986 100644 (file)
  */
 
 enum dmub_cmd_psr_type {
-       DMUB_CMD__PSR_SET_VERSION       = 0,
-       DMUB_CMD__PSR_COPY_SETTINGS     = 1,
-       DMUB_CMD__PSR_ENABLE            = 2,
-       DMUB_CMD__PSR_DISABLE           = 3,
-       DMUB_CMD__PSR_SET_LEVEL         = 4,
+       DMUB_CMD__PSR_SET_VERSION               = 0,
+       DMUB_CMD__PSR_COPY_SETTINGS             = 1,
+       DMUB_CMD__PSR_ENABLE                    = 2,
+       DMUB_CMD__PSR_DISABLE                   = 3,
+       DMUB_CMD__PSR_SET_LEVEL                 = 4,
 };
 
 enum psr_version {
-       PSR_VERSION_1                   = 0x10, // PSR Version 1
-       PSR_VERSION_2                   = 0x20, // PSR Version 2, includes selective update
-       PSR_VERSION_2_1                 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+       PSR_VERSION_1                           = 0,
+       PSR_VERSION_UNSUPPORTED                 = 0xFFFFFFFF,
 };
 
 enum dmub_cmd_abm_type {
index df875fdd2ab07a2d99342f167bd83292b63bf7d5..2ae48c18bb5b94fffabb00e9d26e2bd9240aaa90 100644 (file)
@@ -33,8 +33,6 @@
 extern "C" {
 #endif
 
-struct dmub_cmd_header;
-
 struct dmub_rb_init_params {
        void *ctx;
        void *base_address;
@@ -71,7 +69,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
 }
 
 static inline bool dmub_rb_push_front(struct dmub_rb *rb,
-                                     const struct dmub_cmd_header *cmd)
+                                     const union dmub_rb_cmd *cmd)
 {
        uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
        const uint64_t *src = (const uint64_t *)cmd;
@@ -93,7 +91,7 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
 }
 
 static inline bool dmub_rb_front(struct dmub_rb *rb,
-                                struct dmub_cmd_header *cmd)
+                                union dmub_rb_cmd  *cmd)
 {
        uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
 
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
deleted file mode 100644 (file)
index c2671f2..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef _DMUB_SRV_H_
-#define _DMUB_SRV_H_
-
-/**
- * DOC: DMUB interface and operation
- *
- * DMUB is the interface to the display DMCUB microcontroller on DCN hardware.
- * It delegates hardware initialization and command submission to the
- * microcontroller. DMUB is the shortname for DMCUB.
- *
- * This interface is not thread-safe. Ensure that all access to the interface
- * is properly synchronized by the caller.
- *
- * Initialization and usage of the DMUB service should be done in the
- * steps given below:
- *
- * 1. dmub_srv_create()
- * 2. dmub_srv_has_hw_support()
- * 3. dmub_srv_calc_region_info()
- * 4. dmub_srv_hw_init()
- *
- * The call to dmub_srv_create() is required to use the server.
- *
- * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info()
- * are helpers to query cache window size and allocate framebuffer(s)
- * for the cache windows.
- *
- * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
- * for command submission. Commands can be queued via dmub_srv_cmd_queue()
- * and executed via dmub_srv_cmd_execute().
- *
- * If the queue is full the dmub_srv_wait_for_idle() call can be used to
- * wait until the queue has been cleared.
- *
- * Destroying the DMUB service can be done by calling dmub_srv_destroy().
- * This does not clear DMUB hardware state, only software state.
- *
- * The interface is intended to be standalone and should not depend on any
- * other component within DAL.
- */
-
-#include "dmub_types.h"
-#include "dmub_cmd.h"
-#include "dmub_gpint_cmd.h"
-#include "dmub_rb.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* Forward declarations */
-struct dmub_srv;
-struct dmub_cmd_header;
-struct dmub_srv_common_regs;
-
-/* enum dmub_status - return code for dmcub functions */
-enum dmub_status {
-       DMUB_STATUS_OK = 0,
-       DMUB_STATUS_NO_CTX,
-       DMUB_STATUS_QUEUE_FULL,
-       DMUB_STATUS_TIMEOUT,
-       DMUB_STATUS_INVALID,
-};
-
-/* enum dmub_asic - dmub asic identifier */
-enum dmub_asic {
-       DMUB_ASIC_NONE = 0,
-       DMUB_ASIC_DCN20,
-       DMUB_ASIC_DCN21,
-       DMUB_ASIC_MAX,
-};
-
-/* enum dmub_window_id - dmub window identifier */
-enum dmub_window_id {
-       DMUB_WINDOW_0_INST_CONST = 0,
-       DMUB_WINDOW_1_STACK,
-       DMUB_WINDOW_2_BSS_DATA,
-       DMUB_WINDOW_3_VBIOS,
-       DMUB_WINDOW_4_MAILBOX,
-       DMUB_WINDOW_5_TRACEBUFF,
-       DMUB_WINDOW_6_FW_STATE,
-       DMUB_WINDOW_7_SCRATCH_MEM,
-       DMUB_WINDOW_TOTAL,
-};
-
-/**
- * struct dmub_region - dmub hw memory region
- * @base: base address for region, must be 256 byte aligned
- * @top: top address for region
- */
-struct dmub_region {
-       uint32_t base;
-       uint32_t top;
-};
-
-/**
- * struct dmub_window - dmub hw cache window
- * @off: offset to the fb memory in gpu address space
- * @r: region in uc address space for cache window
- */
-struct dmub_window {
-       union dmub_addr offset;
-       struct dmub_region region;
-};
-
-/**
- * struct dmub_fb - defines a dmub framebuffer memory region
- * @cpu_addr: cpu virtual address for the region, NULL if invalid
- * @gpu_addr: gpu virtual address for the region, NULL if invalid
- * @size: size of the region in bytes, zero if invalid
- */
-struct dmub_fb {
-       void *cpu_addr;
-       uint64_t gpu_addr;
-       uint32_t size;
-};
-
-/**
- * struct dmub_srv_region_params - params used for calculating dmub regions
- * @inst_const_size: size of the fw inst const section
- * @bss_data_size: size of the fw bss data section
- * @vbios_size: size of the vbios data
- * @fw_bss_data: raw firmware bss data section
- */
-struct dmub_srv_region_params {
-       uint32_t inst_const_size;
-       uint32_t bss_data_size;
-       uint32_t vbios_size;
-       const uint8_t *fw_bss_data;
-};
-
-/**
- * struct dmub_srv_region_info - output region info from the dmub service
- * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes
- * @num_regions: number of regions used by the dmub service
- * @regions: region info
- *
- * The regions are aligned such that they can be all placed within the
- * same framebuffer but they can also be placed into different framebuffers.
- *
- * The size of each region can be calculated by the caller:
- * size = reg.top - reg.base
- *
- * Care must be taken when performing custom allocations to ensure that each
- * region base address is 256 byte aligned.
- */
-struct dmub_srv_region_info {
-       uint32_t fb_size;
-       uint8_t num_regions;
-       struct dmub_region regions[DMUB_WINDOW_TOTAL];
-};
-
-/**
- * struct dmub_srv_fb_params - parameters used for driver fb setup
- * @region_info: region info calculated by dmub service
- * @cpu_addr: base cpu address for the framebuffer
- * @gpu_addr: base gpu virtual address for the framebuffer
- */
-struct dmub_srv_fb_params {
-       const struct dmub_srv_region_info *region_info;
-       void *cpu_addr;
-       uint64_t gpu_addr;
-};
-
-/**
- * struct dmub_srv_fb_info - output fb info from the dmub service
- * @num_fbs: number of required dmub framebuffers
- * @fbs: fb data for each region
- *
- * Output from the dmub service helper that can be used by the
- * driver to prepare dmub_fb that can be passed into the dmub
- * hw init service.
- *
- * Assumes that all regions are within the same framebuffer
- * and have been setup according to the region_info generated
- * by the dmub service.
- */
-struct dmub_srv_fb_info {
-       uint8_t num_fb;
-       struct dmub_fb fb[DMUB_WINDOW_TOTAL];
-};
-
-/**
- * struct dmub_srv_base_funcs - Driver specific base callbacks
- */
-struct dmub_srv_base_funcs {
-       /**
-        * @reg_read:
-        *
-        * Hook for reading a register.
-        *
-        * Return: The 32-bit register value from the given address.
-        */
-       uint32_t (*reg_read)(void *ctx, uint32_t address);
-
-       /**
-        * @reg_write:
-        *
-        * Hook for writing a value to the register specified by address.
-        */
-       void (*reg_write)(void *ctx, uint32_t address, uint32_t value);
-};
-
-/**
- * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub
- */
-struct dmub_srv_hw_funcs {
-       /* private: internal use only */
-
-       void (*init)(struct dmub_srv *dmub);
-
-       void (*reset)(struct dmub_srv *dmub);
-
-       void (*reset_release)(struct dmub_srv *dmub);
-
-       void (*backdoor_load)(struct dmub_srv *dmub,
-                             const struct dmub_window *cw0,
-                             const struct dmub_window *cw1);
-
-       void (*setup_windows)(struct dmub_srv *dmub,
-                             const struct dmub_window *cw2,
-                             const struct dmub_window *cw3,
-                             const struct dmub_window *cw4,
-                             const struct dmub_window *cw5,
-                             const struct dmub_window *cw6);
-
-       void (*setup_mailbox)(struct dmub_srv *dmub,
-                             const struct dmub_region *inbox1);
-
-       uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
-
-       void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
-
-       bool (*is_supported)(struct dmub_srv *dmub);
-
-       bool (*is_hw_init)(struct dmub_srv *dmub);
-
-       bool (*is_phy_init)(struct dmub_srv *dmub);
-
-       bool (*is_auto_load_done)(struct dmub_srv *dmub);
-
-       void (*set_gpint)(struct dmub_srv *dmub,
-                         union dmub_gpint_data_register reg);
-
-       bool (*is_gpint_acked)(struct dmub_srv *dmub,
-                              union dmub_gpint_data_register reg);
-
-       uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
-};
-
-/**
- * struct dmub_srv_create_params - params for dmub service creation
- * @base_funcs: driver supplied base routines
- * @hw_funcs: optional overrides for hw funcs
- * @user_ctx: context data for callback funcs
- * @asic: driver supplied asic
- * @is_virtual: false for hw support only
- */
-struct dmub_srv_create_params {
-       struct dmub_srv_base_funcs funcs;
-       struct dmub_srv_hw_funcs *hw_funcs;
-       void *user_ctx;
-       enum dmub_asic asic;
-       bool is_virtual;
-};
-
-/*
- * struct dmub_srv_hw_params - params for dmub hardware initialization
- * @fb: framebuffer info for each region
- * @fb_base: base of the framebuffer aperture
- * @fb_offset: offset of the framebuffer aperture
- * @psp_version: psp version to pass for DMCU init
- * @load_inst_const: true if DMUB should load inst const fw
- */
-struct dmub_srv_hw_params {
-       struct dmub_fb *fb[DMUB_WINDOW_TOTAL];
-       uint64_t fb_base;
-       uint64_t fb_offset;
-       uint32_t psp_version;
-       bool load_inst_const;
-};
-
-/**
- * struct dmub_srv - software state for dmcub
- * @asic: dmub asic identifier
- * @user_ctx: user provided context for the dmub_srv
- * @is_virtual: false if hardware support only
- * @fw_state: dmub firmware state pointer
- */
-struct dmub_srv {
-       enum dmub_asic asic;
-       void *user_ctx;
-       bool is_virtual;
-       struct dmub_fb scratch_mem_fb;
-       volatile const struct dmub_fw_state *fw_state;
-
-       /* private: internal use only */
-       const struct dmub_srv_common_regs *regs;
-
-       struct dmub_srv_base_funcs funcs;
-       struct dmub_srv_hw_funcs hw_funcs;
-       struct dmub_rb inbox1_rb;
-
-       bool sw_init;
-       bool hw_init;
-
-       uint64_t fb_base;
-       uint64_t fb_offset;
-       uint32_t psp_version;
-};
-
-/**
- * dmub_srv_create() - creates the DMUB service.
- * @dmub: the dmub service
- * @params: creation parameters for the service
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
-                                const struct dmub_srv_create_params *params);
-
-/**
- * dmub_srv_destroy() - destroys the DMUB service.
- * @dmub: the dmub service
- */
-void dmub_srv_destroy(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_calc_region_info() - retreives region info from the dmub service
- * @dmub: the dmub service
- * @params: parameters used to calculate region locations
- * @info_out: the output region info from dmub
- *
- * Calculates the base and top address for all relevant dmub regions
- * using the parameters given (if any).
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status
-dmub_srv_calc_region_info(struct dmub_srv *dmub,
-                         const struct dmub_srv_region_params *params,
-                         struct dmub_srv_region_info *out);
-
-/**
- * dmub_srv_calc_region_info() - retreives fb info from the dmub service
- * @dmub: the dmub service
- * @params: parameters used to calculate fb locations
- * @info_out: the output fb info from dmub
- *
- * Calculates the base and top address for all relevant dmub regions
- * using the parameters given (if any).
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
-                                      const struct dmub_srv_fb_params *params,
-                                      struct dmub_srv_fb_info *out);
-
-/**
- * dmub_srv_has_hw_support() - returns hw support state for dmcub
- * @dmub: the dmub service
- * @is_supported: hw support state
- *
- * Queries the hardware for DMCUB support and returns the result.
- *
- * Can be called before dmub_srv_hw_init().
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
-                                        bool *is_supported);
-
-/**
- * dmub_srv_is_hw_init() - returns hardware init state
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init);
-
-/**
- * dmub_srv_hw_init() - initializes the underlying DMUB hardware
- * @dmub: the dmub service
- * @params: params for hardware initialization
- *
- * Resets the DMUB hardware and performs backdoor loading of the
- * required cache regions based on the input framebuffer regions.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_NO_CTX - dmcub context not initialized
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
-                                 const struct dmub_srv_hw_params *params);
-
-/**
- * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized
- * @dmub: the dmub service
- *
- * Before destroying the DMUB service or releasing the backing framebuffer
- * memory we'll need to put the DMCUB into reset first.
- *
- * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_cmd_queue() - queues a command to the DMUB
- * @dmub: the dmub service
- * @cmd: the command to queue
- *
- * Queues a command to the DMUB service but does not begin execution
- * immediately.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_QUEUE_FULL - no remaining room in queue
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
-                                   const struct dmub_cmd_header *cmd);
-
-/**
- * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
- * @dmub: the dmub service
- *
- * Begins execution of queued commands on the dmub.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete
- * @dmub: the dmub service
- * @timeout_us: the maximum number of microseconds to wait
- *
- * Waits until firmware has been autoloaded by the DMCUB. The maximum
- * wait time is given in microseconds to prevent spinning forever.
- *
- * On ASICs without firmware autoload support this function will return
- * immediately.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_TIMEOUT - wait for phy init timed out
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
-                                            uint32_t timeout_us);
-
-/**
- * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete
- * @dmub: the dmub service
- * @timeout_us: the maximum number of microseconds to wait
- *
- * Waits until the PHY has been initialized by the DMUB. The maximum
- * wait time is given in microseconds to prevent spinning forever.
- *
- * On ASICs without PHY init support this function will return
- * immediately.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_TIMEOUT - wait for phy init timed out
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
-                                           uint32_t timeout_us);
-
-/**
- * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
- * @dmub: the dmub service
- * @timeout_us: the maximum number of microseconds to wait
- *
- * Waits until the DMUB buffer is empty and all commands have
- * finished processing. The maximum wait time is given in
- * microseconds to prevent spinning forever.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
-                                       uint32_t timeout_us);
-
-/**
- * dmub_srv_send_gpint_command() - Sends a GPINT based command.
- * @dmub: the dmub service
- * @command_code: the command code to send
- * @param: the command parameter to send
- * @timeout_us: the maximum number of microseconds to wait
- *
- * Sends a command via the general purpose interrupt (GPINT).
- * Waits for the number of microseconds specified by timeout_us
- * for the command ACK before returning.
- *
- * Can be called after software initialization.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_TIMEOUT - wait for ACK timed out
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status
-dmub_srv_send_gpint_command(struct dmub_srv *dmub,
-                           enum dmub_gpint_command command_code,
-                           uint16_t param, uint32_t timeout_us);
-
-/**
- * dmub_srv_get_gpint_response() - Queries the GPINT response.
- * @dmub: the dmub service
- * @response: the response for the last GPINT
- *
- * Returns the response code for the last GPINT interrupt.
- *
- * Can be called after software initialization.
- *
- * Return:
- *   DMUB_STATUS_OK - success
- *   DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
-                                            uint32_t *response);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* _DMUB_SRV_H_ */
index 41d524b0db2f3dbac64ab45d4f17c7e17486d62b..bed5b023a3967ac95a6eb575b5c2a308eb38e3ee 100644 (file)
@@ -49,6 +49,12 @@ extern "C" {
 #define dmub_udelay(microseconds) udelay(microseconds)
 #endif
 
+/* Maximum number of streams on any ASIC. */
+#define DMUB_MAX_STREAMS 6
+
+/* Maximum number of planes on any ASIC. */
+#define DMUB_MAX_PLANES 6
+
 union dmub_addr {
        struct {
                uint32_t low_part;
@@ -57,6 +63,11 @@ union dmub_addr {
        uint64_t quad_part;
 };
 
+struct dmub_psr_debug_flags {
+       uint8_t visual_confirm : 1;
+       uint8_t reserved : 7;
+};
+
 #if defined(__cplusplus)
 }
 #endif
index 63bb9e2c81de2c91c8b9e37037f9499329c33137..edc73d6d7ba2848cb8a779a292a86dea8af9df23 100644 (file)
@@ -23,7 +23,7 @@
  *
  */
 
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 #include "dmub_reg.h"
 #include "dmub_dcn20.h"
 
@@ -186,14 +186,22 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
 
        dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
 
-       dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
-
-       REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
-       REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
-       REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
-       REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
-                 DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
-                 DMCUB_REGION3_CW2_ENABLE, 1);
+       if (cw2->region.base != cw2->region.top) {
+               dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset,
+                                         &offset);
+
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+               REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+               REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+                         DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+                         DMCUB_REGION3_CW2_ENABLE, 1);
+       } else {
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0);
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0);
+               REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0);
+               REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0);
+       }
 
        dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
 
index 5bed9fcd6b5cc06f76f8fb2827de3a360b8aab3e..e8f488232e347582bdb33604403df6e3f878988b 100644 (file)
@@ -23,7 +23,7 @@
  *
  */
 
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 #include "dmub_reg.h"
 #include "dmub_dcn21.h"
 
index 4094eca212f0b11b8aa8e8a5a765c6d4bf6a94c1..ca0c8a54b635e49214a9326dd60af4776f6bdac1 100644 (file)
@@ -24,7 +24,7 @@
  */
 
 #include "dmub_reg.h"
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 
 struct dmub_reg_value_masks {
        uint32_t value;
index ce32cc7933c40777f185b08063b4b47cd5a892b1..0e3751d94cb09a937f91de74aadf1a644a45aff6 100644 (file)
@@ -23,7 +23,7 @@
  *
  */
 
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 #include "dmub_dcn20.h"
 #include "dmub_dcn21.h"
 #include "dmub_fw_meta.h"
@@ -70,7 +70,7 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
        return (val + factor - 1) / factor * factor;
 }
 
-static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+void dmub_flush_buffer_mem(const struct dmub_fb *fb)
 {
        const uint8_t *base = (const uint8_t *)fb->cpu_addr;
        uint8_t buf[64];
@@ -91,18 +91,32 @@ static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
 }
 
 static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
 {
        const union dmub_fw_meta *meta;
+       const uint8_t *blob = NULL;
+       uint32_t blob_size = 0;
+       uint32_t meta_offset = 0;
+
+       if (params->fw_bss_data) {
+               /* Legacy metadata region. */
+               blob = params->fw_bss_data;
+               blob_size = params->bss_data_size;
+               meta_offset = DMUB_FW_META_OFFSET;
+       } else if (params->fw_inst_const) {
+               /* Combined metadata region. */
+               blob = params->fw_inst_const;
+               blob_size = params->inst_const_size;
+               meta_offset = 0;
+       }
 
-       if (fw_bss_data == NULL)
+       if (!blob || !blob_size)
                return NULL;
 
-       if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+       if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
                return NULL;
 
-       meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
-                                           DMUB_FW_META_OFFSET -
+       meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
                                            sizeof(union dmub_fw_meta));
 
        if (meta->info.magic_value != DMUB_FW_META_MAGIC)
@@ -247,8 +261,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
        mail->base = dmub_align(bios->top, 256);
        mail->top = mail->base + DMUB_MAILBOX_SIZE;
 
-       fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
-                                       params->bss_data_size);
+       fw_info = dmub_get_fw_meta_info(params);
 
        if (fw_info) {
                fw_state_size = fw_info->fw_region_size;
@@ -449,7 +462,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
 }
 
 enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
-                                   const struct dmub_cmd_header *cmd)
+                                   const union dmub_rb_cmd *cmd)
 {
        if (!dmub->hw_init)
                return DMUB_STATUS_INVALID;
index f31e6befc8d68e260eb55acfa4d6e206f97f043e..42229b4effdce756fd5454fcb0c1d12797d75f09 100644 (file)
@@ -83,6 +83,12 @@ enum hdcp_link {
        HDCP_LINK_SECONDARY
 };
 
+enum hdcp_message_status {
+       HDCP_MESSAGE_SUCCESS,
+       HDCP_MESSAGE_FAILURE,
+       HDCP_MESSAGE_UNSUPPORTED
+};
+
 struct hdcp_protection_message {
        enum hdcp_version version;
        /* relevant only for DVI */
@@ -91,6 +97,7 @@ struct hdcp_protection_message {
        uint32_t length;
        uint8_t max_retries;
        uint8_t *data;
+       enum hdcp_message_status status;
 };
 
 #endif
index 6e008de25629be9cf18432b7728969c0ce5ce963..02c23b04d34be3e5c1383ecf7a9454fd672d4423 100644 (file)
@@ -40,8 +40,6 @@ struct dc_state;
  *
  */
 
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
-
 void pre_surface_trace(
                struct dc *dc,
                const struct dc_plane_state *const *plane_states,
@@ -102,14 +100,12 @@ void context_clock_trace(
 #define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
                do { \
                        (void)(link); \
-                       dc_conn_log_hex_linux(hex_data, hex_len); \
                        DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
                } while (0)
 
 #define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
                do { \
                        (void)(link); \
-                       dc_conn_log_hex_linux(hex_data, hex_len); \
                        DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
                } while (0)
 
index cac09d500fda938daa5899f44f0bdc87a2725ea5..9431b48aecb48687178d5c66cd8e0bbd8327ddc7 100644 (file)
@@ -1782,7 +1782,8 @@ rgb_user_alloc_fail:
        return ret;
 }
 
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+               struct dc_transfer_func *input_tf,
                const struct dc_gamma *ramp, bool mapUserRamp)
 {
        struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
@@ -1801,11 +1802,29 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
        /* we can use hardcoded curve for plain SRGB TF
         * If linear, it's bypass if on user ramp
         */
-       if (input_tf->type == TF_TYPE_PREDEFINED &&
-                       (input_tf->tf == TRANSFER_FUNCTION_SRGB ||
-                                       input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
-                                       !mapUserRamp)
-               return true;
+       if (input_tf->type == TF_TYPE_PREDEFINED) {
+               if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
+                               input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
+                               !mapUserRamp)
+                       return true;
+
+               if (dc_caps != NULL &&
+                       dc_caps->dpp.dcn_arch == 1) {
+
+                       if (input_tf->tf == TRANSFER_FUNCTION_PQ &&
+                                       dc_caps->dpp.dgam_rom_caps.pq == 1)
+                               return true;
+
+                       if (input_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
+                                       dc_caps->dpp.dgam_rom_caps.gamma2_2 == 1)
+                               return true;
+
+                       // HLG OOTF not accounted for
+                       if (input_tf->tf == TRANSFER_FUNCTION_HLG &&
+                                       dc_caps->dpp.dgam_rom_caps.hlg == 1)
+                               return true;
+               }
+       }
 
        input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
 
@@ -1902,7 +1921,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
 
 
-       if (ramp->type == GAMMA_CUSTOM)
+       if (ramp && ramp->type == GAMMA_CUSTOM)
                apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
 
        ret = true;
index 9994817a9a03267d79f6efb714c4b883819e88fb..7f56226ba77a91904ae3b64224098a868eb4a0a3 100644 (file)
@@ -30,6 +30,7 @@ struct dc_transfer_func;
 struct dc_gamma;
 struct dc_transfer_func_distributed_points;
 struct dc_rgb_fixed;
+struct dc_color_caps;
 enum dc_transfer_func_predefined;
 
 /* For SetRegamma ADL interface support
@@ -100,7 +101,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
                const struct freesync_hdr_tf_params *fs_params);
 
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+               struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp);
 
 bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
index c33454a9e0b4dc88a3e6bbc36a2946db0febcefd..eb7421e83b8658c0352fbb08d91ee25b0e90f8d7 100644 (file)
@@ -443,7 +443,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
                return true;
        } else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
                        in_vrr->fixed.target_refresh_in_uhz !=
-                                       in_config->min_refresh_in_uhz) {
+                                       in_config->fixed_refresh_in_uhz) {
                return true;
        } else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) {
                return true;
@@ -491,7 +491,7 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
        return false;
 }
 
-static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
+static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
                struct dc_info_packet *infopacket)
 {
        /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
@@ -523,14 +523,74 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
                        vrr->state == VRR_STATE_ACTIVE_FIXED)
                infopacket->sb[6] |= 0x04;
 
+       // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
        /* PB7 = FreeSync Minimum refresh rate (Hz) */
-       infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+       if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+                       vrr->state == VRR_STATE_ACTIVE_FIXED) {
+               infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+       } else {
+               infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+       }
 
        /* PB8 = FreeSync Maximum refresh rate (Hz)
         * Note: We should never go above the field rate of the mode timing set.
         */
        infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
 
+       //FreeSync HDR
+       infopacket->sb[9] = 0;
+       infopacket->sb[10] = 0;
+}
+
+static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
+               struct dc_info_packet *infopacket)
+{
+       /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+       infopacket->sb[1] = 0x1A;
+
+       /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+       infopacket->sb[2] = 0x00;
+
+       /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+       infopacket->sb[3] = 0x00;
+
+       /* PB4 = Reserved */
+
+       /* PB5 = Reserved */
+
+       /* PB6 = [Bits 7:3 = Reserved] */
+
+       /* PB6 = [Bit 0 = FreeSync Supported] */
+       if (vrr->state != VRR_STATE_UNSUPPORTED)
+               infopacket->sb[6] |= 0x01;
+
+       /* PB6 = [Bit 1 = FreeSync Enabled] */
+       if (vrr->state != VRR_STATE_DISABLED &&
+                       vrr->state != VRR_STATE_UNSUPPORTED)
+               infopacket->sb[6] |= 0x02;
+
+       /* PB6 = [Bit 2 = FreeSync Active] */
+       if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+                       vrr->state == VRR_STATE_ACTIVE_FIXED)
+               infopacket->sb[6] |= 0x04;
+
+       if (vrr->state == VRR_STATE_ACTIVE_FIXED) {
+               /* PB7 = FreeSync Minimum refresh rate (Hz) */
+               infopacket->sb[7] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+               /* PB8 = FreeSync Maximum refresh rate (Hz) */
+               infopacket->sb[8] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+       } else if (vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
+               /* PB7 = FreeSync Minimum refresh rate (Hz) */
+               infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+               /* PB8 = FreeSync Maximum refresh rate (Hz) */
+               infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+       } else {
+               // Non-fs case, program nominal range
+               /* PB7 = FreeSync Minimum refresh rate (Hz) */
+               infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+               /* PB8 = FreeSync Maximum refresh rate (Hz) */
+               infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+       }
 
        //FreeSync HDR
        infopacket->sb[9] = 0;
@@ -678,7 +738,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
        unsigned int payload_size = 0;
 
        build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
-       build_vrr_infopacket_data(vrr, infopacket);
+       build_vrr_infopacket_data_v1(vrr, infopacket);
        build_vrr_infopacket_checksum(&payload_size, infopacket);
 
        infopacket->valid = true;
@@ -692,7 +752,24 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
        unsigned int payload_size = 0;
 
        build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
-       build_vrr_infopacket_data(vrr, infopacket);
+       build_vrr_infopacket_data_v1(vrr, infopacket);
+
+       build_vrr_infopacket_fs2_data(app_tf, infopacket);
+
+       build_vrr_infopacket_checksum(&payload_size, infopacket);
+
+       infopacket->valid = true;
+}
+
+static void build_vrr_infopacket_v3(enum signal_type signal,
+               const struct mod_vrr_params *vrr,
+               enum color_transfer_func app_tf,
+               struct dc_info_packet *infopacket)
+{
+       unsigned int payload_size = 0;
+
+       build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
+       build_vrr_infopacket_data_v3(vrr, infopacket);
 
        build_vrr_infopacket_fs2_data(app_tf, infopacket);
 
@@ -717,11 +794,14 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
                return;
 
        switch (packet_type) {
-       case PACKET_TYPE_FS2:
+       case PACKET_TYPE_FS_V3:
+               build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+               break;
+       case PACKET_TYPE_FS_V2:
                build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
                break;
        case PACKET_TYPE_VRR:
-       case PACKET_TYPE_FS1:
+       case PACKET_TYPE_FS_V1:
        default:
                build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
        }
@@ -793,6 +873,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                                calc_duration_in_us_from_refresh_in_uhz(
                                                (unsigned int)max_refresh_in_uhz);
 
+               if (in_config->state == VRR_STATE_ACTIVE_FIXED)
+                       in_out_vrr->fixed_refresh_in_uhz = in_config->fixed_refresh_in_uhz;
+               else
+                       in_out_vrr->fixed_refresh_in_uhz = 0;
+
                refresh_range = in_out_vrr->max_refresh_in_uhz -
                                in_out_vrr->min_refresh_in_uhz;
 
@@ -843,7 +928,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                                in_out_vrr->min_refresh_in_uhz);
        } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
                in_out_vrr->fixed.target_refresh_in_uhz =
-                               in_out_vrr->min_refresh_in_uhz;
+                               in_out_vrr->fixed_refresh_in_uhz;
                if (in_out_vrr->fixed.ramping_active &&
                                in_out_vrr->fixed.fixed_active) {
                        /* Do not update vtotals if ramping is already active
index cc1d3f470b99f0a8836c44252bbca3eecc9ffd72..e9fbd94f8635e7654c7a701f604c8ceecdfd5c15 100644 (file)
@@ -328,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
        /* add display to connection */
        hdcp->connection.link = *link;
        *display_container = *display;
-       status = mod_hdcp_add_display_to_topology(hdcp, display->index);
+       status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
        if (status != MOD_HDCP_STATUS_SUCCESS)
                goto out;
 
@@ -374,7 +375,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
        status = mod_hdcp_remove_display_from_topology(hdcp, index);
        if (status != MOD_HDCP_STATUS_SUCCESS)
                goto out;
-       display->state = MOD_HDCP_DISPLAY_INACTIVE;
+       memset(display, 0, sizeof(struct mod_hdcp_display));
 
        /* request authentication when connection is not reset */
        if (current_state(hdcp) != HDCP_UNINITIALIZED)
index 5cb4546be0ef04c7e8a66f1ed33f7c2e4f9641e8..b0cefed2eb02660b7f43d5ad7b83a9d2edbbb453 100644 (file)
@@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
 
 /* psp functions */
 enum mod_hdcp_status mod_hdcp_add_display_to_topology(
-               struct mod_hdcp *hdcp, uint8_t index);
+               struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
 enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
                struct mod_hdcp *hdcp, uint8_t index);
 enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
@@ -357,8 +357,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
                struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
                struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
-                                                              enum mod_hdcp_encryption_status *encryption_status);
 
 /* ddc functions */
 enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
@@ -503,11 +501,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
        return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
 }
 
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
-       return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
 static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
 {
        return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,34 +508,23 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
 
 static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
 {
-       uint8_t added_count = 0;
+       uint8_t active_count = 0;
        uint8_t i;
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
                if (is_display_active(&hdcp->displays[i]))
-                       added_count++;
-       return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
-       uint8_t added_count = 0;
-       uint8_t i;
-
-       for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_added(&hdcp->displays[i]))
-                       added_count++;
-       return added_count;
+                       active_count++;
+       return active_count;
 }
 
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
                struct mod_hdcp *hdcp)
 {
        uint8_t i;
        struct mod_hdcp_display *display = NULL;
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_added(&hdcp->displays[i])) {
+               if (is_display_active(&hdcp->displays[i])) {
                        display = &hdcp->displays[i];
                        break;
                }
index 37c8c05497d66487c697712a58bf708deb001e47..f244b72e74e06969a47abab0096948795fa3bc38 100644 (file)
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
        /* device count must be greater than or equal to tracked hdcp displays */
-       return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+       return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
                        MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
                        MOD_HDCP_STATUS_SUCCESS;
 }
index 491c00f48026e285d6c9a89c68fb17756ed51657..549c113abcf7fa438de7fb19d70b8e06e07231f6 100644 (file)
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
        /* device count must be greater than or equal to tracked hdcp displays */
-       return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+       return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
                        MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
                        MOD_HDCP_STATUS_SUCCESS;
 }
index 44956f9ba17844bf19250246bee0fb3686394f85..fb6a19d020f9556d6102d0f331b92c99e4d8f763 100644 (file)
@@ -98,8 +98,8 @@ char *mod_hdcp_status_to_str(int32_t status)
                return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
        case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
                return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
-       case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
-               return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+       case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
+               return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
        case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
                return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
        case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
@@ -158,8 +158,8 @@ char *mod_hdcp_status_to_str(int32_t status)
                return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
        case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
                return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
-       case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
-               return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+       case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
+               return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
        case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
                return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
        case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
index c2929815c3ee977d685df8ed9e287b1175484cf4..fb1161dd7ea809d2c2e69201589a485233276188 100644 (file)
@@ -51,12 +51,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
        struct ta_dtm_shared_memory *dtm_cmd;
        struct mod_hdcp_display *display =
                        get_active_display_at_index(hdcp, index);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
 
-       if (!display || !is_display_added(display))
+       if (!display || !is_display_active(display))
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
+       mutex_lock(&psp->dtm_context.mutex);
+
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
        dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -66,34 +69,33 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
 
        psp_dtm_invoke(psp, dtm_cmd->cmd_id);
 
-       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+       } else {
+               display->state = MOD_HDCP_DISPLAY_ACTIVE;
+               HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+       }
 
-       display->state = MOD_HDCP_DISPLAY_ACTIVE;
-       HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->dtm_context.mutex);
+       return status;
 }
 enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
-                                                     uint8_t index)
+                                              struct mod_hdcp_display *display)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_dtm_shared_memory *dtm_cmd;
-       struct mod_hdcp_display *display =
-                       get_active_display_at_index(hdcp, index);
        struct mod_hdcp_link *link = &hdcp->connection.link;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!psp->dtm_context.dtm_initialized) {
                DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+               display->state = MOD_HDCP_DISPLAY_INACTIVE;
                return MOD_HDCP_STATUS_FAILURE;
        }
 
-       if (!display || is_display_added(display))
-               return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
        dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
 
+       mutex_lock(&psp->dtm_context.mutex);
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
        dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -113,21 +115,24 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
 
        psp_dtm_invoke(psp, dtm_cmd->cmd_id);
 
-       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
-       display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-       HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+               display->state = MOD_HDCP_DISPLAY_INACTIVE;
+               status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+       } else {
+               HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->dtm_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
 {
 
        struct psp_context *psp = hdcp->config.psp.handle;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
        struct ta_hdcp_shared_memory *hdcp_cmd;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!psp->hdcp_context.hdcp_initialized) {
                DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
@@ -135,6 +140,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
        }
 
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+       mutex_lock(&psp->hdcp_context.mutex);
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
@@ -144,16 +151,18 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
 
        hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
-
-       hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
-       memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
-               sizeof(hdcp->auth.msg.hdcp1.aksv));
-       memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
-               sizeof(hdcp->auth.msg.hdcp1.an));
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+       } else {
+               hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+               memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+                      sizeof(hdcp->auth.msg.hdcp1.aksv));
+               memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+                      sizeof(hdcp->auth.msg.hdcp1.an));
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
@@ -162,7 +171,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
        uint8_t i = 0;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -171,27 +182,30 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
-
-       HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
-       for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_encryption_enabled(
-                               &hdcp->displays[i])) {
-                       hdcp->displays[i].state =
-                                       MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-                       HDCP_HDCP1_DISABLED_TRACE(hdcp,
-                                       hdcp->displays[i].index);
-               }
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+       } else {
+               HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+               for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+                       if (is_display_encryption_enabled(&hdcp->displays[i])) {
+                               hdcp->displays[i].state =
+                                                       MOD_HDCP_DISPLAY_ACTIVE;
+                               HDCP_HDCP1_DISABLED_TRACE(
+                                       hdcp, hdcp->displays[i].index);
+                       }
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -206,10 +220,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
-       if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+       } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
            TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
                /* needs second part of authentication */
                hdcp->connection.is_repeater = 1;
@@ -219,20 +232,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
        } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
                   TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
                hdcp->connection.is_hdcp1_revoked = 1;
-               return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
+               status = MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
        } else
-               return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
+               status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -241,14 +256,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
-
-       if (!is_dp_mst_hdcp(hdcp)) {
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE;
+       } else if (!is_dp_mst_hdcp(hdcp)) {
                display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
                HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
        }
-       return MOD_HDCP_STATUS_SUCCESS;
+
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
@@ -257,6 +273,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -287,6 +304,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
                status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
        }
 
+       mutex_unlock(&psp->hdcp_context.mutex);
        return status;
 }
 
@@ -296,14 +314,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
        int i = 0;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
 
-               if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
-                   hdcp->displays[i].adjust.disable)
-                       continue;
+               if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+                               continue;
 
                memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -313,21 +332,26 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
 
                psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-               if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-                       return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+               if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+                       status = MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+                       break;
+               }
 
                hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
                HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
        }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -339,12 +363,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+                       hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level != 1)
+               status = MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
 
-       return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
-                      ? MOD_HDCP_STATUS_SUCCESS
-                      : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
@@ -364,19 +388,23 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
 
        if (!psp->hdcp_context.hdcp_initialized) {
                DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
                return MOD_HDCP_STATUS_FAILURE;
        }
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
        if (!display)
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
+       mutex_lock(&psp->hdcp_context.mutex);
+
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
        hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
 
        if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
@@ -393,12 +421,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
 
-       hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+       else
+               hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
@@ -406,7 +436,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
        uint8_t i = 0;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -415,20 +447,21 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
-
-       HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
-       for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_encryption_enabled(
-                               &hdcp->displays[i])) {
-                       hdcp->displays[i].state =
-                                       MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-                       HDCP_HDCP2_DISABLED_TRACE(hdcp,
-                                       hdcp->displays[i].index);
-               }
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+       } else {
+               HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+               for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+                       if (is_display_encryption_enabled(&hdcp->displays[i])) {
+                               hdcp->displays[i].state =
+                                                       MOD_HDCP_DISPLAY_ACTIVE;
+                               HDCP_HDCP2_DISABLED_TRACE(
+                                       hdcp, hdcp->displays[i].index);
+                       }
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
@@ -437,7 +470,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -452,12 +487,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
        if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
-
-       memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.ake_init));
+               status = MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+       else
+               memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.ake_init));
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
@@ -466,7 +502,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -488,26 +526,32 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
-
-       memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
-              &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
-              sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
-
-       if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
-               hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
-               hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
-               return MOD_HDCP_STATUS_SUCCESS;
-       } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
-               hdcp->connection.is_hdcp2_revoked = 1;
-               return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+       } else {
+               memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+               memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+                      &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+                      sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+               if (msg_out->process.msg1_status ==
+                   TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+                       hdcp->connection.is_km_stored =
+                               msg_out->process.is_km_stored ? 1 : 0;
+                       hdcp->connection.is_repeater =
+                               msg_out->process.is_repeater ? 1 : 0;
+                       status = MOD_HDCP_STATUS_SUCCESS;
+               } else if (msg_out->process.msg1_status ==
+                          TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+                       hdcp->connection.is_hdcp2_revoked = 1;
+                       status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+               }
        }
-
-       return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -516,7 +560,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -543,16 +589,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
        if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
-
-       if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+       else if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
        else if (!hdcp->connection.is_km_stored &&
-                msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
-
+                  msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
@@ -561,7 +606,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -577,12 +624,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
        if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.lc_init));
+               status = MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+       else
+               memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.lc_init));
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
@@ -591,7 +639,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -610,13 +660,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
-
-       if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+                       msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
@@ -625,7 +674,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -642,48 +693,55 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.ske_eks));
-       msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
-
-       if (is_dp_hdcp(hdcp)) {
-               memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
-                      &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
-                      sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+       } else {
+               memcpy(hdcp->auth.msg.hdcp2.ske_eks,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+               msg_out->prepare.msg1_desc.msg_size =
+                       sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+               if (is_dp_hdcp(hdcp)) {
+                       memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+                              &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+                              sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+               }
        }
+       mutex_unlock(&psp->hdcp_context.mutex);
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
-
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!display)
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
+       mutex_lock(&psp->hdcp_context.mutex);
+
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
        hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
 
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
-
-       if (!is_dp_mst_hdcp(hdcp)) {
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+       } else if (!is_dp_mst_hdcp(hdcp)) {
                display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
                HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
        }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
@@ -692,6 +750,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+       mutex_lock(&psp->hdcp_context.mutex);
 
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -712,23 +773,26 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
-
-       if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
-               hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
-               hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
-               return MOD_HDCP_STATUS_SUCCESS;
-       } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
-               hdcp->connection.is_hdcp2_revoked = 1;
-               return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+       } else {
+               memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+               if (msg_out->process.msg1_status ==
+                   TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+                       hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+                       hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+                       status = MOD_HDCP_STATUS_SUCCESS;
+               } else if (msg_out->process.msg1_status ==
+                          TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+                       hdcp->connection.is_hdcp2_revoked = 1;
+                       status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+               }
        }
-
-
-       return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -737,7 +801,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        uint8_t i;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -747,9 +813,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
 
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-               if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
-                   hdcp->displays[i].adjust.disable)
-                       continue;
+               if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+                               continue;
+
                hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
                hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
 
@@ -763,8 +829,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
                HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
        }
 
-       return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
-                                                                 : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+       if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_SUCCESS;
+       else
+               status = MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
@@ -774,7 +845,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -789,15 +862,17 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
-
-       hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
-
-       memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+       } else {
+               hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+               memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+       }
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
@@ -806,7 +881,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -825,38 +902,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
-                              (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-                      ? MOD_HDCP_STATUS_SUCCESS
-                      : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
-}
-
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
-                                                              enum mod_hdcp_encryption_status *encryption_status)
-{
-       struct psp_context *psp = hdcp->config.psp.handle;
-       struct ta_hdcp_shared_memory *hdcp_cmd;
-
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-
-       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
-       hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
-       hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
-       hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
-       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
-       psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
-
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_FAILURE;
-
-       if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
-               if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
-                       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
-               else
-                       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
-       }
+       if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+           msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_SUCCESS;
+       else
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
+
index dbe7835aabcf747c25825e76dac9d2e047bb9025..0ba3cf7f336a82074fc9a6c4607fea4a9aed86b9 100644 (file)
@@ -83,6 +83,8 @@ struct mod_freesync_config {
        bool btr;
        unsigned int min_refresh_in_uhz;
        unsigned int max_refresh_in_uhz;
+       unsigned int fixed_refresh_in_uhz;
+
 };
 
 struct mod_vrr_params_btr {
@@ -112,6 +114,7 @@ struct mod_vrr_params {
        uint32_t max_duration_in_us;
        uint32_t max_refresh_in_uhz;
        uint32_t min_duration_in_us;
+       uint32_t fixed_refresh_in_uhz;
 
        struct dc_crtc_timing_adjust adjust;
 
index c088602bc1a03cfe413980a84b6386d10b83ec6b..eed560eecbab498dce293105ca1add9add9a5dc9 100644 (file)
@@ -60,7 +60,7 @@ enum mod_hdcp_status {
        MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
        MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
        MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
-       MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+       MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
        MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
        MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
        MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
@@ -90,7 +90,7 @@ enum mod_hdcp_status {
        MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
        MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
        MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
-       MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+       MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
        MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
        MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
        MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
@@ -117,7 +117,6 @@ enum mod_hdcp_operation_mode {
 enum mod_hdcp_display_state {
        MOD_HDCP_DISPLAY_INACTIVE = 0,
        MOD_HDCP_DISPLAY_ACTIVE,
-       MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
        MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
 };
 
index fe21179043298c206ac43d85bd5c7c79f039aac8..198c0e64d13a824fdfacb73d40487c89775dea37 100644 (file)
@@ -40,8 +40,9 @@ enum color_transfer_func {
 
 enum vrr_packet_type {
        PACKET_TYPE_VRR,
-       PACKET_TYPE_FS1,
-       PACKET_TYPE_FS2,
+       PACKET_TYPE_FS_V1,
+       PACKET_TYPE_FS_V2,
+       PACKET_TYPE_FS_V3,
        PACKET_TYPE_VTEM
 };
 
index cff3ab15fc0cc5ced207161b57090af2a213bc67..7cd8a43d188962c45d92b05042f59bb4a4d006ec 100644 (file)
@@ -144,7 +144,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
        }
 
        /*VSC packet set to 2 when DP revision >= 1.2*/
-       if (stream->psr_version != 0)
+       if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
                vsc_packet_revision = vsc_packet_rev2;
 
        /* Update to revision 5 for extended colorimetry support */
index e75a4bb94488ec6fa71febd25d23e5e56853da8c..8c37bcc27132c7576cbb10459ab5a18a1306dbb8 100644 (file)
@@ -24,6 +24,9 @@
 
 #include "power_helpers.h"
 #include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc.h"
+#include "core_types.h"
 
 #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
 
@@ -237,7 +240,7 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
 }
 
 static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params,
-               struct iram_table_v_2_2 *table)
+               struct iram_table_v_2_2 *table, bool big_endian)
 {
        unsigned int i;
        unsigned int num_entries = NUM_BL_CURVE_SEGS;
@@ -261,10 +264,12 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
                lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
                ASSERT(lut_index < params.backlight_lut_array_size);
 
-               table->backlight_thresholds[i] =
-                       cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
-               table->backlight_offsets[i] =
-                       cpu_to_be16(params.backlight_lut_array[lut_index]);
+               table->backlight_thresholds[i] = (big_endian) ?
+                       cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) :
+                       cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries));
+               table->backlight_offsets[i] = (big_endian) ?
+                       cpu_to_be16(params.backlight_lut_array[lut_index]) :
+                       cpu_to_le16(params.backlight_lut_array[lut_index]);
        }
 }
 
@@ -584,18 +589,18 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
        ram_table->crgb_slope[7]  = cpu_to_be16(0x1910);
 
        fill_backlight_transform_table_v_2_2(
-                       params, ram_table);
+                       params, ram_table, true);
 }
 
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
 {
        unsigned int i, j;
        unsigned int set = params.set;
 
        ram_table->flags = 0x0;
-
-       ram_table->min_abm_backlight =
-                       cpu_to_be16(params.min_abm_backlight);
+       ram_table->min_abm_backlight = (big_endian) ?
+               cpu_to_be16(params.min_abm_backlight) :
+               cpu_to_le16(params.min_abm_backlight);
 
        for (i = 0; i < NUM_AGGR_LEVEL; i++) {
                ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
@@ -619,33 +624,51 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
        ram_table->iir_curve[4] = 0x65;
 
        //Gamma 2.2
-       ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
-       ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
-       ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
-       ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
-       ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
-       ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
-       ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
-       ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
-       ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
-       ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
-       ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
-       ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
-       ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
-       ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
-       ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
-       ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
-       ram_table->crgb_slope[0]  = cpu_to_be16(0x3609);
-       ram_table->crgb_slope[1]  = cpu_to_be16(0x2dfa);
-       ram_table->crgb_slope[2]  = cpu_to_be16(0x27ea);
-       ram_table->crgb_slope[3]  = cpu_to_be16(0x235d);
-       ram_table->crgb_slope[4]  = cpu_to_be16(0x2042);
-       ram_table->crgb_slope[5]  = cpu_to_be16(0x1dc3);
-       ram_table->crgb_slope[6]  = cpu_to_be16(0x1b1a);
-       ram_table->crgb_slope[7]  = cpu_to_be16(0x1910);
+       ram_table->crgb_thresh[0] = (big_endian) ? cpu_to_be16(0x127c) : cpu_to_le16(0x127c);
+       ram_table->crgb_thresh[1] = (big_endian) ? cpu_to_be16(0x151b) : cpu_to_le16(0x151b);
+       ram_table->crgb_thresh[2] = (big_endian) ? cpu_to_be16(0x17d5) : cpu_to_le16(0x17d5);
+       ram_table->crgb_thresh[3] = (big_endian) ? cpu_to_be16(0x1a56) : cpu_to_le16(0x1a56);
+       ram_table->crgb_thresh[4] = (big_endian) ? cpu_to_be16(0x1c83) : cpu_to_le16(0x1c83);
+       ram_table->crgb_thresh[5] = (big_endian) ? cpu_to_be16(0x1e72) : cpu_to_le16(0x1e72);
+       ram_table->crgb_thresh[6] = (big_endian) ? cpu_to_be16(0x20f0) : cpu_to_le16(0x20f0);
+       ram_table->crgb_thresh[7] = (big_endian) ? cpu_to_be16(0x232b) : cpu_to_le16(0x232b);
+       ram_table->crgb_offset[0] = (big_endian) ? cpu_to_be16(0x2999) : cpu_to_le16(0x2999);
+       ram_table->crgb_offset[1] = (big_endian) ? cpu_to_be16(0x3999) : cpu_to_le16(0x3999);
+       ram_table->crgb_offset[2] = (big_endian) ? cpu_to_be16(0x4666) : cpu_to_le16(0x4666);
+       ram_table->crgb_offset[3] = (big_endian) ? cpu_to_be16(0x5999) : cpu_to_le16(0x5999);
+       ram_table->crgb_offset[4] = (big_endian) ? cpu_to_be16(0x6333) : cpu_to_le16(0x6333);
+       ram_table->crgb_offset[5] = (big_endian) ? cpu_to_be16(0x7800) : cpu_to_le16(0x7800);
+       ram_table->crgb_offset[6] = (big_endian) ? cpu_to_be16(0x8c00) : cpu_to_le16(0x8c00);
+       ram_table->crgb_offset[7] = (big_endian) ? cpu_to_be16(0xa000) : cpu_to_le16(0xa000);
+       ram_table->crgb_slope[0]  = (big_endian) ? cpu_to_be16(0x3609) : cpu_to_le16(0x3609);
+       ram_table->crgb_slope[1]  = (big_endian) ? cpu_to_be16(0x2dfa) : cpu_to_le16(0x2dfa);
+       ram_table->crgb_slope[2]  = (big_endian) ? cpu_to_be16(0x27ea) : cpu_to_le16(0x27ea);
+       ram_table->crgb_slope[3]  = (big_endian) ? cpu_to_be16(0x235d) : cpu_to_le16(0x235d);
+       ram_table->crgb_slope[4]  = (big_endian) ? cpu_to_be16(0x2042) : cpu_to_le16(0x2042);
+       ram_table->crgb_slope[5]  = (big_endian) ? cpu_to_be16(0x1dc3) : cpu_to_le16(0x1dc3);
+       ram_table->crgb_slope[6]  = (big_endian) ? cpu_to_be16(0x1b1a) : cpu_to_le16(0x1b1a);
+       ram_table->crgb_slope[7]  = (big_endian) ? cpu_to_be16(0x1910) : cpu_to_le16(0x1910);
 
        fill_backlight_transform_table_v_2_2(
-                       params, ram_table);
+                       params, ram_table, big_endian);
+}
+
+bool dmub_init_abm_config(struct abm *abm,
+       struct dmcu_iram_parameters params)
+{
+       unsigned char ram_table[IRAM_SIZE];
+       bool result = false;
+
+       if (abm == NULL)
+               return false;
+
+       memset(&ram_table, 0, sizeof(ram_table));
+
+       fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, false);
+       result = abm->funcs->init_abm_config(
+               abm, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+
+       return result;
 }
 
 bool dmcu_load_iram(struct dmcu *dmcu,
@@ -657,17 +680,17 @@ bool dmcu_load_iram(struct dmcu *dmcu,
        if (dmcu == NULL)
                return false;
 
-       if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+       if (dmcu && !dmcu->funcs->is_dmcu_initialized(dmcu))
                return true;
 
        memset(&ram_table, 0, sizeof(ram_table));
 
        if (dmcu->dmcu_version.abm_version == 0x24) {
-               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
-               result = dmcu->funcs->load_iram(
-                               dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
+                       result = dmcu->funcs->load_iram(
+                                       dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
        } else if (dmcu->dmcu_version.abm_version == 0x23) {
-               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
 
                result = dmcu->funcs->load_iram(
                                dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
index e541570263308af5c504748db2f401e7106c6919..46fbca2e2cd1c850a68e62c1e87fdaa844b2cf40 100644 (file)
@@ -26,6 +26,7 @@
 #define MODULES_POWER_POWER_HELPERS_H_
 
 #include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
 
 
 enum abm_defines {
@@ -44,5 +45,7 @@ struct dmcu_iram_parameters {
 
 bool dmcu_load_iram(struct dmcu *dmcu,
                struct dmcu_iram_parameters params);
+bool dmub_init_abm_config(struct abm *abm,
+               struct dmcu_iram_parameters params);
 
 #endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
deleted file mode 100644 (file)
index 03121ca..0000000
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "mod_stats.h"
-#include "dm_services.h"
-#include "dc.h"
-#include "core_types.h"
-
-#define DAL_STATS_ENABLE_REGKEY                        "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT                0x00000000
-#define DAL_STATS_ENABLE_REGKEY_ENABLED                0x00000001
-
-#define DAL_STATS_ENTRIES_REGKEY               "DalStatsEntries"
-#define DAL_STATS_ENTRIES_REGKEY_DEFAULT       0x00350000
-#define DAL_STATS_ENTRIES_REGKEY_MAX           0x01000000
-
-#define DAL_STATS_EVENT_ENTRIES_DEFAULT                0x00000100
-
-#define MOD_STATS_NUM_VSYNCS                   5
-#define MOD_STATS_EVENT_STRING_MAX             512
-
-struct stats_time_cache {
-       unsigned int entry_id;
-
-       unsigned long flip_timestamp_in_ns;
-       unsigned long vupdate_timestamp_in_ns;
-
-       unsigned int render_time_in_us;
-       unsigned int avg_render_time_in_us_last_ten;
-       unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
-       unsigned int num_vsync_between_flips;
-
-       unsigned int flip_to_vsync_time_in_us;
-       unsigned int vsync_to_flip_time_in_us;
-
-       unsigned int min_window;
-       unsigned int max_window;
-       unsigned int v_total_min;
-       unsigned int v_total_max;
-       unsigned int event_triggers;
-
-       unsigned int lfc_mid_point_in_us;
-       unsigned int num_frames_inserted;
-       unsigned int inserted_duration_in_us;
-
-       unsigned int flags;
-};
-
-struct stats_event_cache {
-       unsigned int entry_id;
-       char event_string[MOD_STATS_EVENT_STRING_MAX];
-};
-
-struct core_stats {
-       struct mod_stats public;
-       struct dc *dc;
-
-       bool enabled;
-       unsigned int entries;
-       unsigned int event_entries;
-       unsigned int entry_id;
-
-       struct stats_time_cache *time;
-       unsigned int index;
-
-       struct stats_event_cache *events;
-       unsigned int event_index;
-
-};
-
-#define MOD_STATS_TO_CORE(mod_stats)\
-               container_of(mod_stats, struct core_stats, public)
-
-bool mod_stats_init(struct mod_stats *mod_stats)
-{
-       bool result = false;
-       struct core_stats *core_stats = NULL;
-       struct dc *dc = NULL;
-
-       if (mod_stats == NULL)
-               return false;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-       dc = core_stats->dc;
-
-       return result;
-}
-
-struct mod_stats *mod_stats_create(struct dc *dc)
-{
-       struct core_stats *core_stats = NULL;
-       struct persistent_data_flag flag;
-       unsigned int reg_data;
-       int i = 0;
-
-       if (dc == NULL)
-               goto fail_construct;
-
-       core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
-
-       if (core_stats == NULL)
-               goto fail_construct;
-
-       core_stats->dc = dc;
-
-       core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
-       if (dm_read_persistent_data(dc->ctx, NULL, NULL,
-                       DAL_STATS_ENABLE_REGKEY,
-                       &reg_data, sizeof(unsigned int), &flag))
-               core_stats->enabled = reg_data;
-
-       if (core_stats->enabled) {
-               core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
-               if (dm_read_persistent_data(dc->ctx, NULL, NULL,
-                               DAL_STATS_ENTRIES_REGKEY,
-                               &reg_data, sizeof(unsigned int), &flag)) {
-                       if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
-                               core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
-                       else
-                               core_stats->entries = reg_data;
-               }
-               core_stats->time = kcalloc(core_stats->entries,
-                                               sizeof(struct stats_time_cache),
-                                               GFP_KERNEL);
-
-               if (core_stats->time == NULL)
-                       goto fail_construct_time;
-
-               core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
-               core_stats->events = kcalloc(core_stats->event_entries,
-                                            sizeof(struct stats_event_cache),
-                                            GFP_KERNEL);
-
-               if (core_stats->events == NULL)
-                       goto fail_construct_events;
-
-       } else {
-               core_stats->entries = 0;
-       }
-
-       /* Purposely leave index 0 unused so we don't need special logic to
-        * handle calculation cases that depend on previous flip data.
-        */
-       core_stats->index = 1;
-       core_stats->event_index = 0;
-
-       // Keeps track of ordering within the different stats structures
-       core_stats->entry_id = 0;
-
-       return &core_stats->public;
-
-fail_construct_events:
-       kfree(core_stats->time);
-
-fail_construct_time:
-       kfree(core_stats);
-
-fail_construct:
-       return NULL;
-}
-
-void mod_stats_destroy(struct mod_stats *mod_stats)
-{
-       if (mod_stats != NULL) {
-               struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-               kfree(core_stats->time);
-               kfree(core_stats->events);
-               kfree(core_stats);
-       }
-}
-
-void mod_stats_dump(struct mod_stats *mod_stats)
-{
-       struct dc  *dc = NULL;
-       struct dal_logger *logger = NULL;
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       struct stats_event_cache *events = NULL;
-       unsigned int time_index = 1;
-       unsigned int event_index = 0;
-       unsigned int index = 0;
-       struct log_entry log_entry;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-       dc = core_stats->dc;
-       logger = dc->ctx->logger;
-       time = core_stats->time;
-       events = core_stats->events;
-
-       DISPLAY_STATS_BEGIN(log_entry);
-
-       DISPLAY_STATS("==Display Caps==\n");
-
-       DISPLAY_STATS("==Display Stats==\n");
-
-       DISPLAY_STATS("%10s %10s %10s %10s %10s"
-                       " %11s %11s %17s %10s %14s"
-                       " %10s %10s %10s %10s %10s"
-                       " %10s %10s %10s %10s\n",
-               "render", "avgRender",
-               "minWindow", "midPoint", "maxWindow",
-               "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
-               "numFrame", "insertDuration",
-               "vTotalMin", "vTotalMax", "eventTrigs",
-               "vSyncTime1", "vSyncTime2", "vSyncTime3",
-               "vSyncTime4", "vSyncTime5", "flags");
-
-       for (int i = 0; i < core_stats->entry_id; i++) {
-               if (event_index < core_stats->event_index &&
-                               i == events[event_index].entry_id) {
-                       DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
-                       event_index++;
-               } else if (time_index < core_stats->index &&
-                               i == time[time_index].entry_id) {
-                       DISPLAY_STATS("%10u %10u %10u %10u %10u"
-                                       " %11u %11u %17u %10u %14u"
-                                       " %10u %10u %10u %10u %10u"
-                                       " %10u %10u %10u %10u\n",
-                               time[time_index].render_time_in_us,
-                               time[time_index].avg_render_time_in_us_last_ten,
-                               time[time_index].min_window,
-                               time[time_index].lfc_mid_point_in_us,
-                               time[time_index].max_window,
-                               time[time_index].vsync_to_flip_time_in_us,
-                               time[time_index].flip_to_vsync_time_in_us,
-                               time[time_index].num_vsync_between_flips,
-                               time[time_index].num_frames_inserted,
-                               time[time_index].inserted_duration_in_us,
-                               time[time_index].v_total_min,
-                               time[time_index].v_total_max,
-                               time[time_index].event_triggers,
-                               time[time_index].v_sync_time_in_us[0],
-                               time[time_index].v_sync_time_in_us[1],
-                               time[time_index].v_sync_time_in_us[2],
-                               time[time_index].v_sync_time_in_us[3],
-                               time[time_index].v_sync_time_in_us[4],
-                               time[time_index].flags);
-
-                       time_index++;
-               }
-       }
-
-       DISPLAY_STATS_END(log_entry);
-}
-
-void mod_stats_reset_data(struct mod_stats *mod_stats)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       memset(core_stats->time, 0,
-               sizeof(struct stats_time_cache) * core_stats->entries);
-
-       memset(core_stats->events, 0,
-               sizeof(struct stats_event_cache) * core_stats->event_entries);
-
-       core_stats->index = 1;
-       core_stats->event_index = 0;
-
-       // Keeps track of ordering within the different stats structures
-       core_stats->entry_id = 0;
-}
-
-void mod_stats_update_event(struct mod_stats *mod_stats,
-               char *event_string,
-               unsigned int length)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_event_cache *events = NULL;
-       unsigned int index = 0;
-       unsigned int copy_length = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->event_index >= core_stats->event_entries)
-               return;
-
-       events = core_stats->events;
-       index = core_stats->event_index;
-
-       copy_length = length;
-       if (length > MOD_STATS_EVENT_STRING_MAX)
-               copy_length = MOD_STATS_EVENT_STRING_MAX;
-
-       memcpy(&events[index].event_string, event_string, copy_length);
-       events[index].event_string[copy_length - 1] = '\0';
-
-       events[index].entry_id = core_stats->entry_id;
-       core_stats->event_index++;
-       core_stats->entry_id++;
-}
-
-void mod_stats_update_flip(struct mod_stats *mod_stats,
-               unsigned long timestamp_in_ns)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->index >= core_stats->entries)
-               return;
-
-       time = core_stats->time;
-       index = core_stats->index;
-
-       time[index].flip_timestamp_in_ns = timestamp_in_ns;
-       time[index].render_time_in_us =
-               (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
-
-       if (index >= 10) {
-               for (unsigned int i = 0; i < 10; i++)
-                       time[index].avg_render_time_in_us_last_ten +=
-                                       time[index - i].render_time_in_us;
-               time[index].avg_render_time_in_us_last_ten /= 10;
-       }
-
-       if (time[index].num_vsync_between_flips > 0)
-               time[index].vsync_to_flip_time_in_us =
-                       (timestamp_in_ns -
-                               time[index].vupdate_timestamp_in_ns) / 1000;
-       else
-               time[index].vsync_to_flip_time_in_us =
-                       (timestamp_in_ns -
-                               time[index - 1].vupdate_timestamp_in_ns) / 1000;
-
-       time[index].entry_id = core_stats->entry_id;
-       core_stats->index++;
-       core_stats->entry_id++;
-}
-
-void mod_stats_update_vupdate(struct mod_stats *mod_stats,
-               unsigned long timestamp_in_ns)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-       unsigned int num_vsyncs = 0;
-       unsigned int prev_vsync_in_ns = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->index >= core_stats->entries)
-               return;
-
-       time = core_stats->time;
-       index = core_stats->index;
-       num_vsyncs = time[index].num_vsync_between_flips;
-
-       if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
-               if (num_vsyncs == 0) {
-                       prev_vsync_in_ns =
-                               time[index - 1].vupdate_timestamp_in_ns;
-
-                       time[index].flip_to_vsync_time_in_us =
-                               (timestamp_in_ns -
-                                       time[index - 1].flip_timestamp_in_ns) /
-                                       1000;
-               } else {
-                       prev_vsync_in_ns =
-                               time[index].vupdate_timestamp_in_ns;
-               }
-
-               time[index].v_sync_time_in_us[num_vsyncs] =
-                       (timestamp_in_ns - prev_vsync_in_ns) / 1000;
-       }
-
-       time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
-       time[index].num_vsync_between_flips++;
-}
-
-void mod_stats_update_freesync(struct mod_stats *mod_stats,
-               unsigned int v_total_min,
-               unsigned int v_total_max,
-               unsigned int event_triggers,
-               unsigned int window_min,
-               unsigned int window_max,
-               unsigned int lfc_mid_point_in_us,
-               unsigned int inserted_frames,
-               unsigned int inserted_duration_in_us)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->index >= core_stats->entries)
-               return;
-
-       time = core_stats->time;
-       index = core_stats->index;
-
-       time[index].v_total_min = v_total_min;
-       time[index].v_total_max = v_total_max;
-       time[index].event_triggers = event_triggers;
-       time[index].min_window = window_min;
-       time[index].max_window = window_max;
-       time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
-       time[index].num_frames_inserted = inserted_frames;
-       time[index].inserted_duration_in_us = inserted_duration_in_us;
-}
-
index e7db6f9f9c865f017b7b7e4e447c74c915b3dabd..8b0b9a2a8fed662b590209b72d1c22d43439fb98 100644 (file)
 #define GRBM_PWR_CNTL__ALL_REQ_EN_MASK                                                                        0x00008000L
 //GRBM_STATUS
 #define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT                                                            0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT                                                                   0x5
 #define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT                                                            0x7
 #define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT                                                            0x8
 #define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT                                                                0x9
 #define GRBM_STATUS__CB_BUSY__SHIFT                                                                           0x1e
 #define GRBM_STATUS__GUI_ACTIVE__SHIFT                                                                        0x1f
 #define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK                                                              0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK                                                                     0x00000020L
 #define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK                                                              0x00000080L
 #define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK                                                              0x00000100L
 #define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK                                                                  0x00000200L
 #define GRBM_READ_ERROR__READ_ERROR_MASK                                                                      0x80000000L
 //GRBM_READ_ERROR2
 #define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT                                                           0x10
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT                                                          0x11
 #define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT                                                           0x12
 #define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT                                                       0x13
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT                                                   0x14
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT                                                      0x1e
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT                                                      0x1f
 #define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK                                                             0x00010000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK                                                            0x00020000L
 #define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK                                                             0x00040000L
 #define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK                                                         0x00080000L
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK                                                     0x00100000L
index 68d0ffad28c7dd6716e02a2c1a9b79c452e8fc62..92fd27c26a77b1e16605d99669d1b2e2b3c827d8 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  0
 #define mmRCC_CONFIG_RESERVED                                                                          0x0de4 // duplicate 
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 0
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x0de5 // duplicate 
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             0
+#endif
 
 
 // addressBlock: syshub_mmreg_ind_syshubdec
index 435462294fbc514b343dd709c3d5cd55cff7034f..a7cd760ebf8f14edaa72462245d35ddee8718b90 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
index ce5830ebe095ab6635971041fcc16e8ca4fbe4ba..0c5a08bc034a6b16422da9925c6273184a8a5040 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
new file mode 100644 (file)
index 0000000..e87c359
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_OFFSET_HEADER
+#define _pwr_10_0_OFFSET_HEADER
+
+#define mmPWR_MISC_CNTL_STATUS                                                                         0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                                                0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
new file mode 100644 (file)
index 0000000..8a000c2
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_SH_MASK_HEADER
+#define _pwr_10_0_SH_MASK_HEADER
+
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT                                                      0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT                                                        0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK                                                        0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK                                                          0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
new file mode 100644 (file)
index 0000000..9bf7328
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_OFFSET_HEADER
+#define _smuio_12_0_0_OFFSET_HEADER
+
+#define mmSMUIO_GFX_MISC_CNTL                                                                          0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                                                                 0
+
+#define mmPWR_MISC_CNTL_STATUS                                                                         0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                                                1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
new file mode 100644 (file)
index 0000000..26556fa
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_SH_MASK_HEADER
+#define _smuio_12_0_0_SH_MASK_HEADER
+
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK                                                           0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT                                                         0x1
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT                                                      0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT                                                        0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK                                                        0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK                                                          0x00000006L
+
+#endif
index 70146518174cd865acfd23fbbe6ec813ddb579e9..b36ea8340afa604eace3a1354862ff8a428879c4 100644 (file)
@@ -972,11 +972,13 @@ struct atom_ext_display_path
 };
 
 //usCaps
-enum ext_display_path_cap_def
-{
-  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE               =0x0001,
-  EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN             =0x0002,
-  EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK              =0x007C,           
+enum ext_display_path_cap_def {
+       EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE =           0x0001,
+       EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN =         0x0002,
+       EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK =          0x007C,
+       EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 =      (0x01 << 2), //PI redriver chip
+       EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
+       EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 =    (0x03 << 2)  //Parade DP->HDMI recoverter chip
 };
 
 struct atom_external_display_connection_info
@@ -1876,6 +1878,108 @@ struct atom_smc_dpm_info_v4_6
   uint32_t   boardreserved[10];
 };
 
+struct atom_smc_dpm_info_v4_7
+{
+  struct   atom_common_table_header  table_header;
+    // SECTION: BOARD PARAMETERS
+    // I2C Control
+  struct smudpm_i2c_controller_config_v2  I2cControllers[8];
+
+  // SVI2 Board Parameters
+  uint16_t     MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+  uint16_t     MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+
+  uint8_t      VddGfxVrMapping;   // Use VR_MAPPING* bitfields
+  uint8_t      VddSocVrMapping;   // Use VR_MAPPING* bitfields
+  uint8_t      VddMem0VrMapping;  // Use VR_MAPPING* bitfields
+  uint8_t      VddMem1VrMapping;  // Use VR_MAPPING* bitfields
+
+  uint8_t      GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+  uint8_t      SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+  uint8_t      ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN)
+  uint8_t      Padding8_V;
+
+  // Telemetry Settings
+  uint16_t     GfxMaxCurrent;   // in Amps
+  uint8_t      GfxOffset;       // in Amps
+  uint8_t      Padding_TelemetryGfx;
+  uint16_t     SocMaxCurrent;   // in Amps
+  uint8_t      SocOffset;       // in Amps
+  uint8_t      Padding_TelemetrySoc;
+
+  uint16_t     Mem0MaxCurrent;   // in Amps
+  uint8_t      Mem0Offset;       // in Amps
+  uint8_t      Padding_TelemetryMem0;
+
+  uint16_t     Mem1MaxCurrent;   // in Amps
+  uint8_t      Mem1Offset;       // in Amps
+  uint8_t      Padding_TelemetryMem1;
+
+  // GPIO Settings
+  uint8_t      AcDcGpio;        // GPIO pin configured for AC/DC switching
+  uint8_t      AcDcPolarity;    // GPIO polarity for AC/DC switching
+  uint8_t      VR0HotGpio;      // GPIO pin configured for VR0 HOT event
+  uint8_t      VR0HotPolarity;  // GPIO polarity for VR0 HOT event
+
+  uint8_t      VR1HotGpio;      // GPIO pin configured for VR1 HOT event
+  uint8_t      VR1HotPolarity;  // GPIO polarity for VR1 HOT event
+  uint8_t      GthrGpio;        // GPIO pin configured for GTHR Event
+  uint8_t      GthrPolarity;    // replace GPIO polarity for GTHR
+
+  // LED Display Settings
+  uint8_t      LedPin0;         // GPIO number for LedPin[0]
+  uint8_t      LedPin1;         // GPIO number for LedPin[1]
+  uint8_t      LedPin2;         // GPIO number for LedPin[2]
+  uint8_t      padding8_4;
+
+  // GFXCLK PLL Spread Spectrum
+  uint8_t      PllGfxclkSpreadEnabled;   // on or off
+  uint8_t      PllGfxclkSpreadPercent;   // Q4.4
+  uint16_t     PllGfxclkSpreadFreq;      // kHz
+
+  // GFXCLK DFLL Spread Spectrum
+  uint8_t      DfllGfxclkSpreadEnabled;   // on or off
+  uint8_t      DfllGfxclkSpreadPercent;   // Q4.4
+  uint16_t     DfllGfxclkSpreadFreq;      // kHz
+
+  // UCLK Spread Spectrum
+  uint8_t      UclkSpreadEnabled;   // on or off
+  uint8_t      UclkSpreadPercent;   // Q4.4
+  uint16_t     UclkSpreadFreq;      // kHz
+
+  // SOCCLK Spread Spectrum
+  uint8_t      SoclkSpreadEnabled;   // on or off
+  uint8_t      SocclkSpreadPercent;   // Q4.4
+  uint16_t     SocclkSpreadFreq;      // kHz
+
+  // Total board power
+  uint16_t     TotalBoardPower;     //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
+  uint16_t     BoardPadding;
+
+  // Mvdd Svi2 Div Ratio Setting
+  uint32_t     MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16)
+
+  // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence
+  uint8_t      GpioI2cScl;          // Serial Clock
+  uint8_t      GpioI2cSda;          // Serial Data
+  uint16_t     GpioPadding;
+
+  // Additional LED Display Settings
+  uint8_t      LedPin3;         // GPIO number for LedPin[3] - PCIE GEN Speed
+  uint8_t      LedPin4;         // GPIO number for LedPin[4] - PMFW Error Status
+  uint16_t     LedEnableMask;
+
+  // Power Limit Scalars
+  uint8_t      PowerLimitScalar[4];    //[PPT_THROTTLER_COUNT]
+
+  uint8_t      MvddUlvPhaseSheddingMask;
+  uint8_t      VddciUlvPhaseSheddingMask;
+  uint8_t      Padding8_Psi1;
+  uint8_t      Padding8_Psi2;
+
+  uint32_t     BoardReserved[5];
+};
+
 /* 
   ***************************************************************************
     Data Table asic_profiling_info  structure
index a69deb3a2ac07a8b3e0db851dc0ccdec6b00d351..60a6536ff656d70a4ade58c3a84c57798157415c 100644 (file)
@@ -32,7 +32,6 @@ struct cgs_device;
  * enum cgs_ind_reg - Indirect register spaces
  */
 enum cgs_ind_reg {
-       CGS_IND_REG__MMIO,
        CGS_IND_REG__PCIE,
        CGS_IND_REG__SMC,
        CGS_IND_REG__UVD_CTX,
index 2a12614a12c224d9c7f9c3d532d91a20f8024e4e..7e6dcdf7df73a83d37da79af18fe516205d84a57 100644 (file)
@@ -50,6 +50,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
        hwmgr->not_vf = !amdgpu_sriov_vf(adev);
        hwmgr->device = amdgpu_cgs_create_device(adev);
        mutex_init(&hwmgr->smu_lock);
+       mutex_init(&hwmgr->msg_lock);
        hwmgr->chip_family = adev->family;
        hwmgr->chip_id = adev->asic_type;
        hwmgr->feature_mask = adev->pm.pp_feature;
@@ -64,6 +65,8 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev)
 {
        struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
+       mutex_destroy(&hwmgr->msg_lock);
+
        kfree(hwmgr->hardcode_pp_table);
        hwmgr->hardcode_pp_table = NULL;
 
@@ -319,12 +322,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
                if (*level & profile_mode_mask) {
                        hwmgr->saved_dpm_level = hwmgr->dpm_level;
                        hwmgr->en_umd_pstate = true;
-                       amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
-                                               AMD_IP_BLOCK_TYPE_GFX,
-                                               AMD_CG_STATE_UNGATE);
                        amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                        AMD_IP_BLOCK_TYPE_GFX,
                                        AMD_PG_STATE_UNGATE);
+                       amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+                                               AMD_IP_BLOCK_TYPE_GFX,
+                                               AMD_CG_STATE_UNGATE);
                }
        } else {
                /* exit umd pstate, restore level, enable gfx cg*/
@@ -1435,7 +1438,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
+       if (!(hwmgr->not_vf && amdgpu_dpm) ||
+               !hwmgr->hwmgr_func->get_asic_baco_capability)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
@@ -1452,8 +1456,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!(hwmgr->not_vf && amdgpu_dpm) ||
-               !hwmgr->hwmgr_func->get_asic_baco_state)
+       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
@@ -1470,7 +1473,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
+       if (!(hwmgr->not_vf && amdgpu_dpm) ||
+               !hwmgr->hwmgr_func->set_asic_baco_state)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
index e8b27fab6aa1d3eb66aa470942910c807e1fa22f..8c684a6e0156547df01b48abfacffee58b76a503 100644 (file)
@@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask
 
 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
 {
+       struct amdgpu_device *adev = smu->adev;
        size_t size = 0;
        int ret = 0, i = 0;
        uint32_t feature_mask[2] = { 0 };
@@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
        uint32_t sort_feature[SMU_FEATURE_COUNT];
        uint64_t hw_feature_count = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
@@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
        uint32_t feature_low = 0, feature_high = 0;
        int ret = 0;
 
-       if (!smu->pm_enabled)
-               return ret;
-
        feature_low = (feature_mask >> 0 ) & 0xffffffff;
        feature_high = (feature_mask >> 32) & 0xffffffff;
 
@@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
        uint64_t feature_2_enabled = 0;
        uint64_t feature_2_disabled = 0;
        uint64_t feature_enables = 0;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        mutex_lock(&smu->mutex);
 
@@ -191,16 +196,31 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
        if (!if_version && !smu_version)
                return -EINVAL;
 
+       if (smu->smc_fw_if_version && smu->smc_fw_version)
+       {
+               if (if_version)
+                       *if_version = smu->smc_fw_if_version;
+
+               if (smu_version)
+                       *smu_version = smu->smc_fw_version;
+
+               return 0;
+       }
+
        if (if_version) {
                ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
                if (ret)
                        return ret;
+
+               smu->smc_fw_if_version = *if_version;
        }
 
        if (smu_version) {
                ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
                if (ret)
                        return ret;
+
+               smu->smc_fw_version = *smu_version;
        }
 
        return ret;
@@ -327,13 +347,13 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
        param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
 
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
-                                         param, &param);
+                                         param, value);
        if (ret)
                return ret;
 
        /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
         * now, we un-support it */
-       *value = param & 0x7fffffff;
+       *value = *value & 0x7fffffff;
 
        return ret;
 }
@@ -417,8 +437,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
                           bool gate)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        switch (block_type) {
        case AMD_IP_BLOCK_TYPE_UVD:
                ret = smu_dpm_set_uvd_enable(smu, !gate);
@@ -511,7 +535,6 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
        int table_id = smu_table_get_index(smu, table_index);
        uint32_t table_size;
        int ret = 0;
-
        if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
                return -EINVAL;
 
@@ -547,12 +570,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
        if (adev->asic_type == CHIP_VEGA20)
                return (amdgpu_dpm == 2) ? true : false;
        else if (adev->asic_type >= CHIP_ARCTURUS) {
-               if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
-                       return false;
-               else
+             if (amdgpu_sriov_is_pp_one_vf(adev) || !amdgpu_sriov_vf(adev))
                        return true;
-       } else
-               return false;
+       }
+       return false;
 }
 
 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
@@ -569,8 +590,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct amdgpu_device *adev = smu->adev;
        uint32_t powerplay_table_size;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
                return -EINVAL;
 
@@ -591,11 +616,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct amdgpu_device *adev = smu->adev;
        ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
        int ret = 0;
 
-       if (!smu->pm_enabled)
+       if (!adev->pm.dpm_enabled)
                return -EINVAL;
+
        if (header->usStructureSize != size) {
                pr_err("pp table size not matched !\n");
                return -EIO;
@@ -636,8 +663,6 @@ int smu_feature_init_dpm(struct smu_context *smu)
        int ret = 0;
        uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
 
-       if (!smu->pm_enabled)
-               return ret;
        mutex_lock(&feature->mutex);
        bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
        mutex_unlock(&feature->mutex);
@@ -665,7 +690,6 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 
        if (smu->is_apu)
                return 1;
-
        feature_id = smu_feature_get_index(smu, mask);
        if (feature_id < 0)
                return 0;
@@ -932,13 +956,6 @@ static int smu_sw_init(void *handle)
                return ret;
        }
 
-       if (adev->smu.ppt_funcs->i2c_eeprom_init) {
-               ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
-
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -948,9 +965,6 @@ static int smu_sw_fini(void *handle)
        struct smu_context *smu = &adev->smu;
        int ret;
 
-       if (adev->smu.ppt_funcs->i2c_eeprom_fini)
-               smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
-
        kfree(smu->irq_source);
        smu->irq_source = NULL;
 
@@ -1323,6 +1337,9 @@ static int smu_hw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 
+       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+               return 0;
+
        ret = smu_start_smc_engine(smu);
        if (ret) {
                pr_err("SMU is not ready yet!\n");
@@ -1336,9 +1353,6 @@ static int smu_hw_init(void *handle)
                smu_set_gfx_cgpg(&adev->smu, true);
        }
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        if (!smu->pm_enabled)
                return 0;
 
@@ -1366,10 +1380,11 @@ static int smu_hw_init(void *handle)
        if (ret)
                goto failed;
 
-       if (!smu->pm_enabled)
-               adev->pm.dpm_enabled = false;
-       else
-               adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
+       ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+       if (ret)
+               goto failed;
+
+       adev->pm.dpm_enabled = true;
 
        pr_info("SMU is initialized successfully!\n");
 
@@ -1381,6 +1396,9 @@ failed:
 
 static int smu_stop_dpms(struct smu_context *smu)
 {
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        return smu_system_features_control(smu, false);
 }
 
@@ -1403,6 +1421,10 @@ static int smu_hw_fini(void *handle)
        if (!smu->pm_enabled)
                return 0;
 
+       adev->pm.dpm_enabled = false;
+
+       smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
        if (!amdgpu_sriov_vf(adev)){
                ret = smu_stop_thermal_control(smu);
                if (ret) {
@@ -1476,7 +1498,7 @@ static int smu_disable_dpm(struct smu_context *smu)
        bool use_baco = !smu->is_apu &&
                ((adev->in_gpu_reset &&
                  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
-                (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+                ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
 
        ret = smu_get_smc_version(smu, NULL, &smu_version);
        if (ret) {
@@ -1542,6 +1564,10 @@ static int smu_suspend(void *handle)
        if (!smu->pm_enabled)
                return 0;
 
+       adev->pm.dpm_enabled = false;
+
+       smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
        if(!amdgpu_sriov_vf(adev)) {
                ret = smu_disable_dpm(smu);
                if (ret)
@@ -1587,11 +1613,17 @@ static int smu_resume(void *handle)
        if (ret)
                goto failed;
 
+       ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+       if (ret)
+               goto failed;
+
        if (smu->is_apu)
                smu_set_gfx_cgpg(&adev->smu, true);
 
        smu->disable_uclk_switch = 0;
 
+       adev->pm.dpm_enabled = true;
+
        pr_info("SMU is resumed successfully!\n");
 
        return 0;
@@ -1603,10 +1635,14 @@ failed:
 int smu_display_configuration_change(struct smu_context *smu,
                                     const struct amd_pp_display_configuration *display_config)
 {
+       struct amdgpu_device *adev = smu->adev;
        int index = 0;
        int num_of_active_display = 0;
 
-       if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
+       if (!is_support_sw_smu(smu->adev))
                return -EINVAL;
 
        if (!display_config)
@@ -1668,12 +1704,16 @@ int smu_get_current_clocks(struct smu_context *smu,
                           struct amd_pp_clock_info *clocks)
 {
        struct amd_pp_simple_clock_info simple_clocks = {0};
+       struct amdgpu_device *adev = smu->adev;
        struct smu_clock_info hw_clocks;
        int ret = 0;
 
        if (!is_support_sw_smu(smu->adev))
                return -EINVAL;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        smu_get_dal_power_level(smu, &simple_clocks);
@@ -1736,7 +1776,7 @@ static int smu_enable_umd_pstate(void *handle,
        struct smu_context *smu = (struct smu_context*)(handle);
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-       if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
+       if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
        if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1744,12 +1784,12 @@ static int smu_enable_umd_pstate(void *handle,
                if (*level & profile_mode_mask) {
                        smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
                        smu_dpm_ctx->enable_umd_pstate = true;
-                       amdgpu_device_ip_set_clockgating_state(smu->adev,
-                                                              AMD_IP_BLOCK_TYPE_GFX,
-                                                              AMD_CG_STATE_UNGATE);
                        amdgpu_device_ip_set_powergating_state(smu->adev,
                                                               AMD_IP_BLOCK_TYPE_GFX,
                                                               AMD_PG_STATE_UNGATE);
+                       amdgpu_device_ip_set_clockgating_state(smu->adev,
+                                                              AMD_IP_BLOCK_TYPE_GFX,
+                                                              AMD_CG_STATE_UNGATE);
                }
        } else {
                /* exit umd pstate, restore level, enable gfx cg*/
@@ -1778,9 +1818,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
        long workload;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-       if (!smu->pm_enabled)
-               return -EINVAL;
-
        if (!skip_display_settings) {
                ret = smu_display_config_changed(smu);
                if (ret) {
@@ -1831,8 +1868,12 @@ int smu_handle_task(struct smu_context *smu,
                    enum amd_pp_task task_id,
                    bool lock_needed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (lock_needed)
                mutex_lock(&smu->mutex);
 
@@ -1866,10 +1907,11 @@ int smu_switch_power_profile(struct smu_context *smu,
                             bool en)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        long workload;
        uint32_t index;
 
-       if (!smu->pm_enabled)
+       if (!adev->pm.dpm_enabled)
                return -EINVAL;
 
        if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
@@ -1900,8 +1942,12 @@ int smu_switch_power_profile(struct smu_context *smu,
 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        enum amd_dpm_forced_level level;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
@@ -1915,8 +1961,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
@@ -1939,8 +1989,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
 
 int smu_set_display_count(struct smu_context *smu, uint32_t count)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
        ret = smu_init_display_count(smu, count);
        mutex_unlock(&smu->mutex);
@@ -1954,8 +2008,12 @@ int smu_force_clk_levels(struct smu_context *smu,
                         bool lock_needed)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
                pr_debug("force clock level is for dpm manual mode only.\n");
                return -EINVAL;
@@ -1973,20 +2031,19 @@ int smu_force_clk_levels(struct smu_context *smu,
        return ret;
 }
 
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ * However, the mp1 state setting should still be granted
+ * even if the dpm_enabled cleared.
+ */
 int smu_set_mp1_state(struct smu_context *smu,
                      enum pp_mp1_state mp1_state)
 {
        uint16_t msg;
        int ret;
 
-       /*
-        * The SMC is not fully ready. That may be
-        * expected as the IP may be masked.
-        * So, just return without error.
-        */
-       if (!smu->pm_enabled)
-               return 0;
-
        mutex_lock(&smu->mutex);
 
        switch (mp1_state) {
@@ -2023,15 +2080,11 @@ int smu_set_mp1_state(struct smu_context *smu,
 int smu_set_df_cstate(struct smu_context *smu,
                      enum pp_df_cstate state)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       /*
-        * The SMC is not fully ready. That may be
-        * expected as the IP may be masked.
-        * So, just return without error.
-        */
-       if (!smu->pm_enabled)
-               return 0;
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
                return 0;
@@ -2047,6 +2100,28 @@ int smu_set_df_cstate(struct smu_context *smu,
        return ret;
 }
 
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
+       if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
+               return 0;
+
+       mutex_lock(&smu->mutex);
+
+       ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
+       if (ret)
+               pr_err("[AllowXgmiPowerDown] failed!\n");
+
+       mutex_unlock(&smu->mutex);
+
+       return ret;
+}
+
 int smu_write_watermarks_table(struct smu_context *smu)
 {
        void *watermarks_table = smu->smu_table.watermarks_table;
@@ -2065,6 +2140,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
                struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
        void *table = smu->smu_table.watermarks_table;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        if (!table)
                return -EINVAL;
@@ -2089,8 +2168,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
 
 int smu_set_ac_dc(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        /* controlled by firmware */
        if (smu->dc_controlled_by_gpio)
                return 0;
@@ -2149,8 +2232,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
 
 int smu_load_microcode(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->load_microcode)
@@ -2163,8 +2250,12 @@ int smu_load_microcode(struct smu_context *smu)
 
 int smu_check_fw_status(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->check_fw_status)
@@ -2191,8 +2282,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
 
 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_fan_speed_rpm)
@@ -2208,10 +2303,15 @@ int smu_get_power_limit(struct smu_context *smu,
                        bool def,
                        bool lock_needed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       if (lock_needed)
+       if (lock_needed) {
+               if (!adev->pm.dpm_enabled)
+                       return -EINVAL;
+
                mutex_lock(&smu->mutex);
+       }
 
        if (smu->ppt_funcs->get_power_limit)
                ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
@@ -2224,8 +2324,12 @@ int smu_get_power_limit(struct smu_context *smu,
 
 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_power_limit)
@@ -2238,8 +2342,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
 
 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->print_clk_levels)
@@ -2252,8 +2360,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
 
 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_od_percentage)
@@ -2266,8 +2378,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
 
 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_od_percentage)
@@ -2282,8 +2398,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
                          enum PP_OD_DPM_TABLE_COMMAND type,
                          long *input, uint32_t size)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->od_edit_dpm_table)
@@ -2298,8 +2418,12 @@ int smu_read_sensor(struct smu_context *smu,
                    enum amd_pp_sensors sensor,
                    void *data, uint32_t *size)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->read_sensor)
@@ -2312,8 +2436,12 @@ int smu_read_sensor(struct smu_context *smu,
 
 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_power_profile_mode)
@@ -2329,8 +2457,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
                               uint32_t param_size,
                               bool lock_needed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (lock_needed)
                mutex_lock(&smu->mutex);
 
@@ -2346,8 +2478,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
 
 int smu_get_fan_control_mode(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_fan_control_mode)
@@ -2360,8 +2496,12 @@ int smu_get_fan_control_mode(struct smu_context *smu)
 
 int smu_set_fan_control_mode(struct smu_context *smu, int value)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_fan_control_mode)
@@ -2374,8 +2514,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
 
 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_fan_speed_percent)
@@ -2388,8 +2532,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
 
 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_fan_speed_percent)
@@ -2402,8 +2550,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 
 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_fan_speed_rpm)
@@ -2416,8 +2568,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
 
 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_deep_sleep_dcefclk)
@@ -2430,8 +2586,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
 
 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (smu->ppt_funcs->set_active_display_count)
                ret = smu->ppt_funcs->set_active_display_count(smu, count);
 
@@ -2442,8 +2602,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
                          enum amd_pp_clock_type type,
                          struct amd_pp_clocks *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_clock_by_type)
@@ -2457,8 +2621,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
 int smu_get_max_high_clocks(struct smu_context *smu,
                            struct amd_pp_simple_clock_info *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_max_high_clocks)
@@ -2473,8 +2641,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
                                       enum smu_clk_type clk_type,
                                       struct pp_clock_levels_with_latency *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_clock_by_type_with_latency)
@@ -2489,8 +2661,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
                                       enum amd_pp_clock_type type,
                                       struct pp_clock_levels_with_voltage *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_clock_by_type_with_voltage)
@@ -2505,8 +2681,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
 int smu_display_clock_voltage_request(struct smu_context *smu,
                                      struct pp_display_clock_request *clock_req)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->display_clock_voltage_request)
@@ -2520,8 +2700,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
 
 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = -EINVAL;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->display_disable_memory_clock_switch)
@@ -2534,8 +2718,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
 
 int smu_notify_smu_enable_pwe(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->notify_smu_enable_pwe)
@@ -2549,8 +2737,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)
 int smu_set_xgmi_pstate(struct smu_context *smu,
                        uint32_t pstate)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_xgmi_pstate)
@@ -2563,8 +2755,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu,
 
 int smu_set_azalia_d3_pme(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_azalia_d3_pme)
@@ -2575,6 +2771,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu)
        return ret;
 }
 
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ *
+ * However, the baco/mode1 reset should still be granted
+ * as they are still supported and necessary.
+ */
 bool smu_baco_is_support(struct smu_context *smu)
 {
        bool ret = false;
@@ -2646,8 +2850,12 @@ int smu_mode2_reset(struct smu_context *smu)
 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
                                         struct pp_smu_nv_clock_table *max_clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
@@ -2662,8 +2870,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
                            unsigned int *clock_values_in_khz,
                            unsigned int *num_states)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_uclk_dpm_states)
@@ -2677,6 +2889,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
 {
        enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        mutex_lock(&smu->mutex);
 
@@ -2691,8 +2907,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
 int smu_get_dpm_clock_table(struct smu_context *smu,
                            struct dpm_clocks *clock_table)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_dpm_clock_table)
index 1ef0923f7190676d26efcfb882a730d7c7b0e13b..cbf70122de9bd41099e6de1a127fff20c3ceacb3 100644 (file)
@@ -128,6 +128,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
        MSG_MAP(SetXgmiMode,                         PPSMC_MSG_SetXgmiMode),
        MSG_MAP(SetMemoryChannelEnable,              PPSMC_MSG_SetMemoryChannelEnable),
        MSG_MAP(DFCstateControl,                     PPSMC_MSG_DFCstateControl),
+       MSG_MAP(GmiPwrDnControl,                     PPSMC_MSG_GmiPwrDnControl),
 };
 
 static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -2226,12 +2227,8 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
 static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
 {
        struct amdgpu_device *adev = to_amdgpu_device(control);
-       struct smu_context *smu = &adev->smu;
        int res;
 
-       if (!smu->pm_enabled)
-               return -EOPNOTSUPP;
-
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
@@ -2247,12 +2244,6 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
 
 static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->pm_enabled)
-               return;
-
        i2c_del_adapter(control);
 }
 
@@ -2261,7 +2252,7 @@ static bool arcturus_is_baco_supported(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        uint32_t val;
 
-       if (!smu_v11_0_baco_is_support(smu))
+       if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
                return false;
 
        val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -2296,6 +2287,35 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
        return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
 }
 
+static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+       uint32_t smu_version;
+       int ret;
+
+       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       if (ret) {
+               pr_err("Failed to get smu version!\n");
+               return ret;
+       }
+
+       /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */
+       if (smu_version < 0x00361700) {
+               pr_err("XGMI power down control is only supported by PMFW 54.23.0 and onwards\n");
+               return -EINVAL;
+       }
+
+       if (en)
+               return smu_send_smc_msg_with_param(smu,
+                                                  SMU_MSG_GmiPwrDnControl,
+                                                  1,
+                                                  NULL);
+
+       return smu_send_smc_msg_with_param(smu,
+                                          SMU_MSG_GmiPwrDnControl,
+                                          0,
+                                          NULL);
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
        /* translate smu index into arcturus specific index */
        .get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2389,6 +2409,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
        .get_pptable_power_limit = arcturus_get_pptable_power_limit,
        .set_df_cstate = arcturus_set_df_cstate,
+       .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
index 77c14671866c07f159729b82e31ec1779b7d4f45..719597c5d27d98e070b202fffd31d4413b994fa9 100644 (file)
@@ -984,6 +984,32 @@ static int init_thermal_controller(
                        struct pp_hwmgr *hwmgr,
                        const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
 {
+       hwmgr->thermal_controller.ucType =
+                       powerplay_table->sThermalController.ucType;
+       hwmgr->thermal_controller.ucI2cLine =
+                       powerplay_table->sThermalController.ucI2cLine;
+       hwmgr->thermal_controller.ucI2cAddress =
+                       powerplay_table->sThermalController.ucI2cAddress;
+
+       hwmgr->thermal_controller.fanInfo.bNoFan =
+               (0 != (powerplay_table->sThermalController.ucFanParameters &
+                       ATOM_PP_FANPARAMETERS_NOFAN));
+
+       hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+               powerplay_table->sThermalController.ucFanParameters &
+               ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+
+       hwmgr->thermal_controller.fanInfo.ulMinRPM
+               = powerplay_table->sThermalController.ucFanMinRPM * 100UL;
+       hwmgr->thermal_controller.fanInfo.ulMaxRPM
+               = powerplay_table->sThermalController.ucFanMaxRPM * 100UL;
+
+       set_hw_cap(hwmgr,
+                  ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+                  PHM_PlatformCaps_ThermalController);
+
+       hwmgr->thermal_controller.use_hw_fan_control = 1;
+
        return 0;
 }
 
index 689072a312a7fb52a015eedf4bd9d53d9720d029..4f8c1b85e68822f3e75ab078581980403901b24a 100644 (file)
@@ -36,6 +36,8 @@
 #include "power_state.h"
 #include "soc15_common.h"
 #include "smu10.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
 
 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID     5
 #define SMU10_MINIMUM_ENGINE_CLOCK         800   /* 8Mhz, the low boundary of engine clock allowed on this chip */
 #define SMU10_DISPCLK_BYPASS_THRESHOLD     10000 /* 100Mhz */
 #define SMC_RAM_END                     0x40000
 
-#define mmPWR_MISC_CNTL_STATUS                                 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT       0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT         0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK         0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK           0x00000006L
-
 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
 
 
@@ -81,7 +76,7 @@ static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
                return -EINVAL;
        }
-       smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
+       smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
 
        return 0;
 }
@@ -214,7 +209,8 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
                smu10_data->deep_sleep_dcefclk = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       smu10_data->deep_sleep_dcefclk);
+                                       smu10_data->deep_sleep_dcefclk,
+                                       NULL);
        }
        return 0;
 }
@@ -228,7 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
                smu10_data->dcf_actual_hard_min_freq = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinDcefclkByFreq,
-                                       smu10_data->dcf_actual_hard_min_freq);
+                                       smu10_data->dcf_actual_hard_min_freq,
+                                       NULL);
        }
        return 0;
 }
@@ -242,7 +239,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
                smu10_data->f_actual_hard_min_freq = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinFclkByFreq,
-                                       smu10_data->f_actual_hard_min_freq);
+                                       smu10_data->f_actual_hard_min_freq,
+                                       NULL);
        }
        return 0;
 }
@@ -255,7 +253,8 @@ static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count
                smu10_data->num_active_display = count;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetDisplayCount,
-                               smu10_data->num_active_display);
+                               smu10_data->num_active_display,
+                               NULL);
        }
 
        return 0;
@@ -278,7 +277,8 @@ static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
        if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                           PPSMC_MSG_SetGfxCGPG,
-                                                          true);
+                                                          true,
+                                                          NULL);
        else
                return 0;
 }
@@ -324,7 +324,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
 
        if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
 
                /* confirm gfx is back to "on" state */
                while (!smu10_is_gfx_on(hwmgr))
@@ -344,7 +344,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
 
        if (adev->pm.pp_feature & PP_GFXOFF_MASK)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
 
        return 0;
 }
@@ -479,12 +479,10 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
        smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
                                        ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
-       result = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
        smu10_data->gfx_min_freq_limit = result / 10 * 1000;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
-       result = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
        smu10_data->gfx_max_freq_limit = result / 10 * 1000;
 
        return 0;
@@ -588,116 +586,148 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK);
+                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK);
+                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK);
+                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK);
+                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               min_sclk);
+                                               min_sclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               min_sclk);
+                                               min_sclk,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               SMU10_UMD_PSTATE_GFXCLK);
+                                               SMU10_UMD_PSTATE_GFXCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               SMU10_UMD_PSTATE_FCLK);
+                                               SMU10_UMD_PSTATE_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_SOCCLK);
+                                               SMU10_UMD_PSTATE_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               SMU10_UMD_PSTATE_GFXCLK);
+                                               SMU10_UMD_PSTATE_GFXCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_FCLK);
+                                               SMU10_UMD_PSTATE_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_SOCCLK);
+                                               SMU10_UMD_PSTATE_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_AUTO:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               min_sclk);
+                                               min_sclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                hwmgr->display_config->num_display > 3 ?
                                                SMU10_UMD_PSTATE_PEAK_FCLK :
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_MIN_SOCCLK);
+                                               SMU10_UMD_PSTATE_MIN_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
-                                               SMU10_UMD_PSTATE_MIN_VCE);
+                                               SMU10_UMD_PSTATE_MIN_VCE,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK);
+                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK);
+                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_LOW:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               data->gfx_min_freq_limit/100);
+                                               data->gfx_min_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               data->gfx_min_freq_limit/100);
+                                               data->gfx_min_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -849,13 +879,15 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                low == 2 ? data->gfx_max_freq_limit/100 :
                                                low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
-                                               data->gfx_min_freq_limit/100);
+                                               data->gfx_min_freq_limit/100,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
                                                high == 0 ? data->gfx_min_freq_limit/100 :
                                                high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                break;
 
        case PP_MCLK:
@@ -864,11 +896,13 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               mclk_table->entries[low].clk/100);
+                                               mclk_table->entries[low].clk/100,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               mclk_table->entries[high].clk/100);
+                                               mclk_table->entries[high].clk/100,
+                                               NULL);
                break;
 
        case PP_PCIE:
@@ -888,8 +922,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        switch (type) {
        case PP_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
 
        /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
                if (now == data->gfx_max_freq_limit/100)
@@ -910,8 +943,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        i == 2 ? "*" : "");
                break;
        case PP_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
 
                for (i = 0; i < mclk_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -1122,15 +1154,13 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
        switch (idx) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
-               sclk = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
                        /* in units of 10KHZ */
                *((uint32_t *)value) = sclk * 100;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
-               mclk = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
                        /* in units of 10KHZ */
                *((uint32_t *)value) = mclk * 100;
                *size = 4;
@@ -1166,20 +1196,20 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
 {
 
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
 }
 
 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
 }
 
 static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
 {
        if (gate)
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
        else
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
 }
 
 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
@@ -1191,11 +1221,11 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_GATE);
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_PowerDownVcn, 0);
+                                       PPSMC_MSG_PowerDownVcn, 0, NULL);
                smu10_data->vcn_power_gated = true;
        } else {
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                               PPSMC_MSG_PowerUpVcn, 0);
+                                               PPSMC_MSG_PowerUpVcn, 0, NULL);
                amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_UNGATE);
@@ -1304,7 +1334,8 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin
                hwmgr->gfxoff_state_changed_by_workload = true;
        }
        result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
-                                               1 << workload_type);
+                                               1 << workload_type,
+                                               NULL);
        if (!result)
                hwmgr->power_profile_mode = input[size];
        if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
@@ -1319,13 +1350,13 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                   PPSMC_MSG_DeviceDriverReset,
-                                                  mode);
+                                                  mode,
+                                                  NULL);
 }
 
 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .backend_init = smu10_hwmgr_backend_init,
        .backend_fini = smu10_hwmgr_backend_fini,
-       .asic_setup = NULL,
        .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
        .force_dpm_level = smu10_dpm_force_dpm_level,
        .get_power_state_size = smu10_get_power_state_size,
index 683b29a993666513d0330291ef80a11522df381a..f2bda3bcbbde23b1b5963b8f7e3a45b160070122 100644 (file)
@@ -29,14 +29,16 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr, enable ?
                        PPSMC_MSG_UVDDPM_Enable :
-                       PPSMC_MSG_UVDDPM_Disable);
+                       PPSMC_MSG_UVDDPM_Disable,
+                       NULL);
 }
 
 static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr, enable ?
                        PPSMC_MSG_VCEDPM_Enable :
-                       PPSMC_MSG_VCEDPM_Disable);
+                       PPSMC_MSG_VCEDPM_Disable,
+                       NULL);
 }
 
 static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
@@ -57,7 +59,8 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_uvd_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_UVDPowerOFF);
+                               PPSMC_MSG_UVDPowerOFF,
+                               NULL);
        return 0;
 }
 
@@ -67,10 +70,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                                  PHM_PlatformCaps_UVDDynamicPowerGating)) {
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_UVDPowerON, 1);
+                                       PPSMC_MSG_UVDPowerON, 1, NULL);
                } else {
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_UVDPowerON, 0);
+                                       PPSMC_MSG_UVDPowerON, 0, NULL);
                }
        }
 
@@ -81,7 +84,8 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_vce_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_VCEPowerOFF);
+                               PPSMC_MSG_VCEPowerOFF,
+                               NULL);
        return 0;
 }
 
@@ -89,7 +93,8 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_vce_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_VCEPowerON);
+                               PPSMC_MSG_VCEPowerON,
+                               NULL);
        return 0;
 }
 
@@ -181,7 +186,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_CGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -191,7 +196,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_CGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -204,7 +209,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_3DCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -215,7 +220,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_3DLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -228,7 +233,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_RLC_LS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -241,7 +246,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_CP_LS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -255,7 +260,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                                CG_GFX_OTHERS_MGCG_MASK);
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -275,7 +280,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_BIF_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        if  (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -285,7 +290,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_BIF_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -298,7 +303,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_MC_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -309,7 +314,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_MC_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -322,7 +327,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_DRM_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -332,7 +337,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_DRM_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -345,7 +350,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_HDP_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -356,7 +361,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_HDP_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -369,7 +374,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_SDMA_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -380,7 +385,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_SDMA_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -393,7 +398,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_ROM_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -423,8 +428,10 @@ int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
        if (enable)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_GFX_CU_PG_ENABLE,
-                                       adev->gfx.cu_info.number);
+                                       adev->gfx.cu_info.number,
+                                       NULL);
        else
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_GFX_CU_PG_DISABLE);
+                               PPSMC_MSG_GFX_CU_PG_DISABLE,
+                               NULL);
 }
index 7740488999df78b06326715cc24d580d35286721..753cb2cf6b77e668fd4ec93f3eb0e7ce709b0563 100644 (file)
@@ -186,7 +186,7 @@ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
        }
 
        if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
 
        return 0;
 }
@@ -493,7 +493,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
 
 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
 }
 
 /**
@@ -979,7 +979,8 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_RegulatorHot))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_EnableVRHotGPIOInterrupt);
+                               PPSMC_MSG_EnableVRHotGPIOInterrupt,
+                               NULL);
 
        return 0;
 }
@@ -996,7 +997,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
        if (data->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
 
        return 0;
 }
@@ -1006,7 +1007,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
        if (data->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
 
        return 0;
 }
@@ -1015,13 +1016,14 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_SclkDeepSleep)) {
-               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to enable Master Deep Sleep switch failed!",
                                        return -EINVAL);
        } else {
                if (smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                               PPSMC_MSG_MASTER_DeepSleep_OFF,
+                               NULL)) {
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to disable Master Deep Sleep switch failed!",
                                        return -EINVAL);
@@ -1036,7 +1038,8 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_SclkDeepSleep)) {
                if (smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                               PPSMC_MSG_MASTER_DeepSleep_OFF,
+                               NULL)) {
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to disable Master Deep Sleep switch failed!",
                                        return -EINVAL);
@@ -1089,7 +1092,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                        smu7_disable_sclk_vce_handshake(hwmgr);
 
                PP_ASSERT_WITH_CODE(
-               (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
+               (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
                "Failed to enable SCLK DPM during DPM Start Function!",
                return -EINVAL);
        }
@@ -1101,7 +1104,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(
                                (0 == smum_send_msg_to_smc(hwmgr,
-                                               PPSMC_MSG_MCLKDPM_Enable)),
+                                               PPSMC_MSG_MCLKDPM_Enable,
+                                               NULL)),
                                "Failed to enable MCLK DPM during DPM Start Function!",
                                return -EINVAL);
 
@@ -1172,7 +1176,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
        if (0 == data->pcie_dpm_key_disabled) {
                PP_ASSERT_WITH_CODE(
                                (0 == smum_send_msg_to_smc(hwmgr,
-                                               PPSMC_MSG_PCIeDPM_Enable)),
+                                               PPSMC_MSG_PCIeDPM_Enable,
+                                               NULL)),
                                "Failed to enable pcie DPM during DPM Start Function!",
                                return -EINVAL);
        }
@@ -1180,7 +1185,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_Falcon_QuickTransition)) {
                PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_EnableACDCGPIOInterrupt)),
+                               PPSMC_MSG_EnableACDCGPIOInterrupt,
+                               NULL)),
                                "Failed to enable AC DC GPIO Interrupt!",
                                );
        }
@@ -1197,7 +1203,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
                                "Trying to disable SCLK DPM when DPM is disabled",
                                return 0);
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
        }
 
        /* disable MCLK dpm */
@@ -1205,7 +1211,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
                                "Trying to disable MCLK DPM when DPM is disabled",
                                return 0);
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
        }
 
        return 0;
@@ -1226,7 +1232,8 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
        if (!data->pcie_dpm_key_disabled) {
                PP_ASSERT_WITH_CODE(
                                (smum_send_msg_to_smc(hwmgr,
-                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
+                                               PPSMC_MSG_PCIeDPM_Disable,
+                                               NULL) == 0),
                                "Failed to disable pcie DPM during DPM Stop Function!",
                                return -EINVAL);
        }
@@ -1237,7 +1244,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
                        "Trying to disable voltage DPM when DPM is disabled",
                        return 0);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
 
        return 0;
 }
@@ -1388,7 +1395,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
 
-       smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
+       smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
 
        tmp_result = smu7_enable_sclk_control(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1446,14 +1453,14 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
                if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
                                CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
                        PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
-                                       hwmgr, PPSMC_MSG_EnableAvfs),
+                                       hwmgr, PPSMC_MSG_EnableAvfs, NULL),
                                        "Failed to enable AVFS!",
                                        return -EINVAL);
                }
        } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
                        CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
                PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
-                               hwmgr, PPSMC_MSG_DisableAvfs),
+                               hwmgr, PPSMC_MSG_DisableAvfs, NULL),
                                "Failed to disable AVFS!",
                                return -EINVAL);
        }
@@ -2609,7 +2616,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
 
                        if (level)
                                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                               PPSMC_MSG_PCIeDPM_ForceLevel, level);
+                                               PPSMC_MSG_PCIeDPM_ForceLevel, level,
+                                               NULL);
                }
        }
 
@@ -2623,7 +2631,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
                        if (level)
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                               (1 << level));
+                                               (1 << level),
+                                               NULL);
                }
        }
 
@@ -2637,7 +2646,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
                        if (level)
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                               (1 << level));
+                                               (1 << level),
+                                               NULL);
                }
        }
 
@@ -2656,14 +2666,16 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask,
+                                       NULL);
        }
 
        if (!data->mclk_dpm_key_disabled) {
                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask,
+                                       NULL);
        }
 
        return 0;
@@ -2678,7 +2690,8 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 
        if (!data->pcie_dpm_key_disabled) {
                smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_PCIeDPM_UnForceLevel);
+                               PPSMC_MSG_PCIeDPM_UnForceLevel,
+                               NULL);
        }
 
        return smu7_upload_dpm_level_enable_mask(hwmgr);
@@ -2696,7 +2709,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                                                              data->dpm_level_enable_mask.sclk_dpm_enable_mask);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                            PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                                           (1 << level));
+                                                           (1 << level),
+                                                           NULL);
 
        }
 
@@ -2706,7 +2720,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                                                              data->dpm_level_enable_mask.mclk_dpm_enable_mask);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                            PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                                           (1 << level));
+                                                           (1 << level),
+                                                           NULL);
                }
        }
 
@@ -2716,7 +2731,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                                                              data->dpm_level_enable_mask.pcie_dpm_enable_mask);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                            PPSMC_MSG_PCIeDPM_ForceLevel,
-                                                           (level));
+                                                           (level),
+                                                           NULL);
                }
        }
 
@@ -3495,21 +3511,20 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
            (adev->asic_type != CHIP_BONAIRE) &&
            (adev->asic_type != CHIP_FIJI) &&
            (adev->asic_type != CHIP_TONGA)) {
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
-               tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
                *query = tmp;
 
                if (tmp != 0)
                        return 0;
        }
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
                                                        ixSMU_PM_STATUS_95, 0);
 
        for (i = 0; i < 10; i++) {
                msleep(500);
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
                tmp = cgs_read_ind_register(hwmgr->device,
                                                CGS_IND_REG__SMC,
                                                ixSMU_PM_STATUS_95);
@@ -3534,14 +3549,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
        switch (idx) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
-               sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
                *((uint32_t *)value) = sclk;
                *size = 4;
                return 0;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
-               mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
                *((uint32_t *)value) = mclk;
                *size = 4;
                return 0;
@@ -3730,7 +3743,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to freeze SCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SCLKDPM_FreezeLevel),
+                               PPSMC_MSG_SCLKDPM_FreezeLevel,
+                               NULL),
                                "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
                                return -EINVAL);
        }
@@ -3742,7 +3756,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to freeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MCLKDPM_FreezeLevel),
+                               PPSMC_MSG_MCLKDPM_FreezeLevel,
+                               NULL),
                                "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
                                return -EINVAL);
        }
@@ -3804,9 +3819,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
 {
        uint32_t i;
 
+       /* force the trim if mclk_switching is disabled to prevent flicker */
+       bool force_trim = (low_limit == high_limit);
        for (i = 0; i < dpm_table->count; i++) {
        /*skip the trim if od is enabled*/
-               if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+               if ((!hwmgr->od_enabled || force_trim)
+                       && (dpm_table->dpm_levels[i].value < low_limit
                        || dpm_table->dpm_levels[i].value > high_limit))
                        dpm_table->dpm_levels[i].enabled = false;
                else
@@ -3881,7 +3899,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to Unfreeze SCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+                               PPSMC_MSG_SCLKDPM_UnfreezeLevel,
+                               NULL),
                        "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
                        return -EINVAL);
        }
@@ -3893,7 +3912,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to Unfreeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MCLKDPM_UnfreezeLevel),
+                               PPSMC_MSG_MCLKDPM_UnfreezeLevel,
+                               NULL),
                    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
                    return -EINVAL);
        }
@@ -3946,12 +3966,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
        if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
                if (hwmgr->chip_id == CHIP_VEGAM)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
+                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
+                                       NULL);
                else
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
+                                       NULL);
        }
-       return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
+       return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
 }
 
 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -4037,7 +4059,8 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
        advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+                       PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
+                       NULL);
 }
 
 static int
@@ -4045,7 +4068,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
        PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
 
-       return (smum_send_msg_to_smc(hwmgr, msg) == 0) ?  0 : -1;
+       return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ?  0 : -1;
 }
 
 static int
@@ -4129,7 +4152,8 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
        advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+                       PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
+                       NULL);
 }
 
 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
@@ -4259,14 +4283,14 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12))
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
        } else {
                data->mem_latency_high = 330;
                data->mem_latency_low = 330;
                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12))
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
        }
 
        return 0;
@@ -4410,13 +4434,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
                if (!data->sclk_dpm_key_disabled)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
+                                       NULL);
                break;
        case PP_MCLK:
                if (!data->mclk_dpm_key_disabled)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
+                                       NULL);
                break;
        case PP_PCIE:
        {
@@ -4424,11 +4450,13 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 
                if (!data->pcie_dpm_key_disabled) {
                        if (fls(tmp) != ffs(tmp))
-                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
+                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
+                                               NULL);
                        else
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_PCIeDPM_ForceLevel,
-                                       fls(tmp) - 1);
+                                       fls(tmp) - 1,
+                                       NULL);
                }
                break;
        }
@@ -4454,8 +4482,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        switch (type) {
        case PP_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
 
                for (i = 0; i < sclk_table->count; i++) {
                        if (clock > sclk_table->dpm_levels[i].value)
@@ -4470,8 +4497,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        (i == now) ? "*" : "");
                break;
        case PP_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
 
                for (i = 0; i < mclk_table->count; i++) {
                        if (clock > mclk_table->dpm_levels[i].value)
index 58f5589aaf126add63407b058e80e7d4bd2a4319..5d4971576111e1fcbde8036a86cb81b8147e74ee 100644 (file)
@@ -887,7 +887,10 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
        didt_block |= block_en << TCP_Enable_SHIFT;
 
        if (enable)
-               result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+               result = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                               PPSMC_MSG_Didt_Block_Function,
+                                               didt_block,
+                                               NULL);
 
        return result;
 }
@@ -1009,7 +1012,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 
                if (hwmgr->chip_id == CHIP_POLARIS11) {
                        result = smum_send_msg_to_smc(hwmgr,
-                                               (uint16_t)(PPSMC_MSG_EnableDpmDidt));
+                                               (uint16_t)(PPSMC_MSG_EnableDpmDidt),
+                                               NULL);
                        PP_ASSERT_WITH_CODE((0 == result),
                                        "Failed to enable DPM DIDT.", goto error);
                }
@@ -1042,7 +1046,8 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
                                goto error);
                if (hwmgr->chip_id == CHIP_POLARIS11) {
                        result = smum_send_msg_to_smc(hwmgr,
-                                               (uint16_t)(PPSMC_MSG_DisableDpmDidt));
+                                               (uint16_t)(PPSMC_MSG_DisableDpmDidt),
+                                               NULL);
                        PP_ASSERT_WITH_CODE((0 == result),
                                        "Failed to disable DPM DIDT.", goto error);
                }
@@ -1063,7 +1068,8 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
        if (PP_CAP(PHM_PlatformCaps_CAC)) {
                int smc_result;
                smc_result = smum_send_msg_to_smc(hwmgr,
-                               (uint16_t)(PPSMC_MSG_EnableCac));
+                               (uint16_t)(PPSMC_MSG_EnableCac),
+                               NULL);
                PP_ASSERT_WITH_CODE((0 == smc_result),
                                "Failed to enable CAC in SMC.", result = -1);
 
@@ -1079,7 +1085,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
 
        if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
                int smc_result = smum_send_msg_to_smc(hwmgr,
-                               (uint16_t)(PPSMC_MSG_DisableCac));
+                               (uint16_t)(PPSMC_MSG_DisableCac),
+                               NULL);
                PP_ASSERT_WITH_CODE((smc_result == 0),
                                "Failed to disable CAC in SMC.", result = -1);
 
@@ -1095,7 +1102,9 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
        if (data->power_containment_features &
                        POWERCONTAINMENT_FEATURE_PkgPwrLimit)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_PkgPwrSetLimit, n<<8);
+                               PPSMC_MSG_PkgPwrSetLimit,
+                               n<<8,
+                               NULL);
        return 0;
 }
 
@@ -1103,7 +1112,9 @@ static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
                                                uint32_t target_tdp)
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+                       PPSMC_MSG_OverDriveSetTargetTdp,
+                       target_tdp,
+                       NULL);
 }
 
 int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
@@ -1124,7 +1135,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
        if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
                if (data->enable_tdc_limit_feature) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitEnable));
+                                       (uint16_t)(PPSMC_MSG_TDCLimitEnable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((0 == smc_result),
                                        "Failed to enable TDCLimit in SMC.", result = -1;);
                        if (0 == smc_result)
@@ -1134,7 +1146,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
 
                if (data->enable_pkg_pwr_tracking_feature) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((0 == smc_result),
                                        "Failed to enable PkgPwrTracking in SMC.", result = -1;);
                        if (0 == smc_result) {
@@ -1163,7 +1176,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
                if (data->power_containment_features &
                                POWERCONTAINMENT_FEATURE_TDCLimit) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((smc_result == 0),
                                        "Failed to disable TDCLimit in SMC.",
                                        result = smc_result);
@@ -1172,7 +1186,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
                if (data->power_containment_features &
                                POWERCONTAINMENT_FEATURE_DTE) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_DisableDTE));
+                                       (uint16_t)(PPSMC_MSG_DisableDTE),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((smc_result == 0),
                                        "Failed to disable DTE in SMC.",
                                        result = smc_result);
@@ -1181,7 +1196,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
                if (data->power_containment_features &
                                POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((smc_result == 0),
                                        "Failed to disable PkgPwrTracking in SMC.",
                                        result = smc_result);
index 5bdc0df5a9f42c0aeef45053f5849a35de963caf..0b30f73649a8384ea7fe0498d1dc2a61db995eff 100644 (file)
@@ -151,8 +151,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
        int result;
 
        if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
-               result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+               result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+                                       FAN_CONTROL_FUZZY, NULL);
 
                if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
                        hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
@@ -164,8 +164,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
                                        advanceFanControlParameters.usMaxFanPWM);
 
        } else {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
-               result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+               result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+                                       FAN_CONTROL_TABLE, NULL);
        }
 
        if (!result && hwmgr->thermal_controller.
@@ -173,7 +173,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
                result = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucTargetTemperature);
+                               advanceFanControlParameters.ucTargetTemperature,
+                               NULL);
        hwmgr->fan_ctrl_enabled = true;
 
        return result;
@@ -183,7 +184,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
 int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
        hwmgr->fan_ctrl_enabled = false;
-       return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl, NULL);
 }
 
 /**
@@ -372,7 +373,7 @@ static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
                        CG_THERMAL_INT, THERM_INT_MASK, alert);
 
        /* send message to SMU to enable internal thermal interrupts */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable, NULL);
 }
 
 /**
@@ -390,7 +391,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
                        CG_THERMAL_INT, THERM_INT_MASK, alert);
 
        /* send message to SMU to disable internal thermal interrupts */
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable, NULL);
 }
 
 /**
index 019d6a206492b05604e42678d5d41a87f6c4e74a..a6c6a793e98eef8d413c434ccfdd099c2d6b41a5 100644 (file)
@@ -162,8 +162,10 @@ static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
 
        if (data->max_sclk_level == 0) {
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
-               data->max_sclk_level = smum_get_argument(hwmgr) + 1;
+               smum_send_msg_to_smc(hwmgr,
+                               PPSMC_MSG_GetMaxSclkLevel,
+                               &data->max_sclk_level);
+               data->max_sclk_level += 1;
        }
 
        return data->max_sclk_level;
@@ -580,7 +582,8 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
        struct phm_uvd_clock_voltage_dependency_table *table =
                                hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-       unsigned long clock = 0, level;
+       unsigned long clock = 0;
+       uint32_t level;
 
        if (NULL == table || table->count <= 0)
                return -EINVAL;
@@ -588,8 +591,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
        data->uvd_dpm.soft_min_clk = 0;
        data->uvd_dpm.hard_min_clk = 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
-       level = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
 
        if (level < table->count)
                clock = table->entries[level].vclk;
@@ -607,7 +609,8 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
        struct phm_vce_clock_voltage_dependency_table *table =
                                hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-       unsigned long clock = 0, level;
+       unsigned long clock = 0;
+       uint32_t level;
 
        if (NULL == table || table->count <= 0)
                return -EINVAL;
@@ -615,8 +618,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
        data->vce_dpm.soft_min_clk = 0;
        data->vce_dpm.hard_min_clk = 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
-       level = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
 
        if (level < table->count)
                clock = table->entries[level].ecclk;
@@ -634,7 +636,8 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
        struct phm_acp_clock_voltage_dependency_table *table =
                                hwmgr->dyn_state.acp_clock_voltage_dependency_table;
-       unsigned long clock = 0, level;
+       unsigned long clock = 0;
+       uint32_t level;
 
        if (NULL == table || table->count <= 0)
                return -EINVAL;
@@ -642,8 +645,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
        data->acp_dpm.soft_min_clk = 0;
        data->acp_dpm.hard_min_clk = 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
-       level = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
 
        if (level < table->count)
                clock = table->entries[level].acpclk;
@@ -665,7 +667,7 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
 #ifdef CONFIG_DRM_AMD_ACP
        data->acp_power_gated = false;
 #else
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
        data->acp_power_gated = true;
 #endif
 
@@ -708,7 +710,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
                                                PPSMC_MSG_SetSclkHardMin,
                                                 smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.hard_min_clk,
-                                            PPSMC_MSG_SetSclkHardMin));
+                                            PPSMC_MSG_SetSclkHardMin),
+                                                NULL);
        }
 
        clock = data->sclk_dpm.soft_min_clk;
@@ -731,7 +734,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
                                                PPSMC_MSG_SetSclkSoftMin,
                                                smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.soft_min_clk,
-                                            PPSMC_MSG_SetSclkSoftMin));
+                                            PPSMC_MSG_SetSclkSoftMin),
+                                               NULL);
        }
 
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -742,7 +746,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
                                                PPSMC_MSG_SetSclkSoftMax,
                                                smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.soft_max_clk,
-                                       PPSMC_MSG_SetSclkSoftMax));
+                                       PPSMC_MSG_SetSclkSoftMax),
+                                               NULL);
        }
 
        return 0;
@@ -760,7 +765,8 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepSclk,
-                               clks);
+                               clks,
+                               NULL);
        }
 
        return 0;
@@ -773,7 +779,8 @@ static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetWatermarkFrequency,
-                                       data->sclk_dpm.soft_max_clk);
+                                       data->sclk_dpm.soft_max_clk,
+                                       NULL);
 
        return 0;
 }
@@ -788,13 +795,15 @@ static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable,
 
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_EnableLowMemoryPstate,
-                                               (lock ? 1 : 0));
+                                               (lock ? 1 : 0),
+                                               NULL);
                } else {
                        PP_DBG_LOG("disable Low Memory PState.\n");
 
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_DisableLowMemoryPstate,
-                                               (lock ? 1 : 0));
+                                               (lock ? 1 : 0),
+                                               NULL);
                }
        }
 
@@ -814,7 +823,8 @@ static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
                ret = smum_send_msg_to_smc_with_parameter(
                                                          hwmgr,
                                                          PPSMC_MSG_DisableAllSmuFeatures,
-                                                         dpm_features);
+                                                         dpm_features,
+                                                         NULL);
                if (ret == 0)
                        data->is_nb_dpm_enabled = false;
        }
@@ -835,7 +845,8 @@ static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
                ret = smum_send_msg_to_smc_with_parameter(
                                                          hwmgr,
                                                          PPSMC_MSG_EnableAllSmuFeatures,
-                                                         dpm_features);
+                                                         dpm_features,
+                                                         NULL);
                if (ret == 0)
                        data->is_nb_dpm_enabled = true;
        }
@@ -953,7 +964,8 @@ static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_EnableAllSmuFeatures,
-                               SCLK_DPM_MASK);
+                               SCLK_DPM_MASK,
+                               NULL);
 }
 
 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
@@ -967,7 +979,8 @@ static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
                data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
                ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DisableAllSmuFeatures,
-                                       dpm_features);
+                                       dpm_features,
+                                       NULL);
        }
        return ret;
 }
@@ -983,13 +996,15 @@ static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
                                PPSMC_MSG_SetSclkSoftMin,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_min_clk,
-                               PPSMC_MSG_SetSclkSoftMin));
+                               PPSMC_MSG_SetSclkSoftMin),
+                               NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_max_clk,
-                               PPSMC_MSG_SetSclkSoftMax));
+                               PPSMC_MSG_SetSclkSoftMax),
+                               NULL);
 
        return 0;
 }
@@ -1127,13 +1142,15 @@ static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
                                        PPSMC_MSG_SetSclkSoftMin,
                                        smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.soft_max_clk,
-                                       PPSMC_MSG_SetSclkSoftMin));
+                                       PPSMC_MSG_SetSclkSoftMin),
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_max_clk,
-                               PPSMC_MSG_SetSclkSoftMax));
+                               PPSMC_MSG_SetSclkSoftMax),
+                               NULL);
 
        return 0;
 }
@@ -1167,13 +1184,15 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
                                PPSMC_MSG_SetSclkSoftMin,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_min_clk,
-                               PPSMC_MSG_SetSclkSoftMin));
+                               PPSMC_MSG_SetSclkSoftMin),
+                               NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_max_clk,
-                               PPSMC_MSG_SetSclkSoftMax));
+                               PPSMC_MSG_SetSclkSoftMax),
+                               NULL);
 
        return 0;
 }
@@ -1186,13 +1205,15 @@ static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                        PPSMC_MSG_SetSclkSoftMax,
                        smu8_get_sclk_level(hwmgr,
                        data->sclk_dpm.soft_min_clk,
-                       PPSMC_MSG_SetSclkSoftMax));
+                       PPSMC_MSG_SetSclkSoftMax),
+                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMin,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_min_clk,
-                               PPSMC_MSG_SetSclkSoftMin));
+                               PPSMC_MSG_SetSclkSoftMin),
+                               NULL);
 
        return 0;
 }
@@ -1227,7 +1248,7 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
        if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
        return 0;
 }
 
@@ -1237,7 +1258,8 @@ static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
                return smum_send_msg_to_smc_with_parameter(
                        hwmgr,
                        PPSMC_MSG_UVDPowerON,
-                       PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
+                       PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
+                       NULL);
        }
 
        return 0;
@@ -1259,15 +1281,20 @@ static int  smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                        PPSMC_MSG_SetEclkHardMin,
                        smu8_get_eclk_level(hwmgr,
                                data->vce_dpm.hard_min_clk,
-                               PPSMC_MSG_SetEclkHardMin));
+                               PPSMC_MSG_SetEclkHardMin),
+                       NULL);
        } else {
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_SetEclkHardMin, 0);
+                                       PPSMC_MSG_SetEclkHardMin,
+                                       0,
+                                       NULL);
                /* disable ECLK DPM 0. Otherwise VCE could hang if
                 * switching SCLK from DPM 0 to 6/7 */
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_SetEclkSoftMin, 1);
+                                       PPSMC_MSG_SetEclkSoftMin,
+                                       1,
+                                       NULL);
        }
        return 0;
 }
@@ -1276,7 +1303,8 @@ static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
        if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
                return smum_send_msg_to_smc(hwmgr,
-                                                    PPSMC_MSG_VCEPowerOFF);
+                                           PPSMC_MSG_VCEPowerOFF,
+                                           NULL);
        return 0;
 }
 
@@ -1284,7 +1312,8 @@ static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
 {
        if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
                return smum_send_msg_to_smc(hwmgr,
-                                                    PPSMC_MSG_VCEPowerON);
+                                           PPSMC_MSG_VCEPowerON,
+                                           NULL);
        return 0;
 }
 
@@ -1435,7 +1464,8 @@ static void smu8_hw_print_display_cfg(
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetDisplaySizePowerParams,
-                                               data);
+                                               data,
+                                               NULL);
        }
 
        return 0;
@@ -1497,10 +1527,12 @@ static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
        case PP_SCLK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMin,
-                               mask);
+                               mask,
+                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
-                               mask);
+                               mask,
+                               NULL);
                break;
        default:
                break;
@@ -1753,9 +1785,10 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *((uint32_t *)value) = 0;
                return 0;
        case AMDGPU_PP_SENSOR_GPU_LOAD:
-               result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
+               result = smum_send_msg_to_smc(hwmgr,
+                               PPSMC_MSG_GetAverageGraphicsActivity,
+                               &activity_percent);
                if (0 == result) {
-                       activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
                        activity_percent = activity_percent > 100 ? 100 : activity_percent;
                } else {
                        activity_percent = 50;
@@ -1785,20 +1818,25 @@ static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrHiVirtual,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrLoVirtual,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrHiPhysical,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrLoPhysical,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramBufferSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -1827,12 +1865,16 @@ static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
                data->dpm_flags |= DPMFlags_UVD_Enabled;
                dpm_features |= UVD_DPM_MASK;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                           PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+                           PPSMC_MSG_EnableAllSmuFeatures,
+                           dpm_features,
+                           NULL);
        } else {
                dpm_features |= UVD_DPM_MASK;
                data->dpm_flags &= ~DPMFlags_UVD_Enabled;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                          PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+                          PPSMC_MSG_DisableAllSmuFeatures,
+                          dpm_features,
+                          NULL);
        }
        return 0;
 }
@@ -1854,7 +1896,8 @@ int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
                                PPSMC_MSG_SetUvdHardMin,
                                smu8_get_uvd_level(hwmgr,
                                        data->uvd_dpm.hard_min_clk,
-                                       PPSMC_MSG_SetUvdHardMin));
+                                       PPSMC_MSG_SetUvdHardMin),
+                               NULL);
 
                        smu8_enable_disable_uvd_dpm(hwmgr, true);
                } else {
@@ -1878,12 +1921,16 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
                data->dpm_flags |= DPMFlags_VCE_Enabled;
                dpm_features |= VCE_DPM_MASK;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                           PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+                           PPSMC_MSG_EnableAllSmuFeatures,
+                           dpm_features,
+                           NULL);
        } else {
                dpm_features |= VCE_DPM_MASK;
                data->dpm_flags &= ~DPMFlags_VCE_Enabled;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                          PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+                          PPSMC_MSG_DisableAllSmuFeatures,
+                          dpm_features,
+                          NULL);
        }
 
        return 0;
@@ -1898,9 +1945,9 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
                return;
 
        if (bgate)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
        else
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
 }
 
 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
index d09690fca45205894e706bc866fa29ee5ff65640..4279f95ba77918d9bb64e8a2c1c66eb8509be820 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/pci.h>
+#include <linux/reboot.h>
 
 #include "hwmgr.h"
 #include "pp_debug.h"
@@ -557,7 +558,9 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
                if (req_vddc <= vddc_table->entries[i].vddc) {
                        req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_VddC_Request, req_volt);
+                                       PPSMC_MSG_VddC_Request,
+                                       req_volt,
+                                       NULL);
                        return;
                }
        }
@@ -593,37 +596,61 @@ int phm_irq_process(struct amdgpu_device *adev,
        uint32_t src_id = entry->src_id;
 
        if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
-               if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
+               if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
                        pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
                                                PCI_BUS_NUM(adev->pdev->devfn),
                                                PCI_SLOT(adev->pdev->devfn),
                                                PCI_FUNC(adev->pdev->devfn));
-               else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+                       /*
+                        * SW CTF just occurred.
+                        * Try to do a graceful shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n");
+                       orderly_poweroff(true);
+               } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
                        pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
                                        PCI_BUS_NUM(adev->pdev->devfn),
                                        PCI_SLOT(adev->pdev->devfn),
                                        PCI_FUNC(adev->pdev->devfn));
-               else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
+               else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
                        pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
                                        PCI_BUS_NUM(adev->pdev->devfn),
                                        PCI_SLOT(adev->pdev->devfn),
                                        PCI_FUNC(adev->pdev->devfn));
+                       /*
+                        * HW CTF just occurred. Shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "System is going to shutdown due to HW CTF!\n");
+                       orderly_poweroff(true);
+               }
        } else if (client_id == SOC15_IH_CLIENTID_THM) {
-               if (src_id == 0)
+               if (src_id == 0) {
                        pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
                                                PCI_BUS_NUM(adev->pdev->devfn),
                                                PCI_SLOT(adev->pdev->devfn),
                                                PCI_FUNC(adev->pdev->devfn));
-               else
+                       /*
+                        * SW CTF just occurred.
+                        * Try to do a graceful shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n");
+                       orderly_poweroff(true);
+               } else
                        pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
                                        PCI_BUS_NUM(adev->pdev->devfn),
                                        PCI_SLOT(adev->pdev->devfn),
                                        PCI_FUNC(adev->pdev->devfn));
-       } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
+       } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
                pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
                                PCI_BUS_NUM(adev->pdev->devfn),
                                PCI_SLOT(adev->pdev->devfn),
                                PCI_FUNC(adev->pdev->devfn));
+               /*
+                * HW CTF just occurred. Shutdown to prevent further damage.
+                */
+               dev_emerg(adev->dev, "System is going to shutdown due to HW CTF!\n");
+               orderly_poweroff(true);
+       }
 
        return 0;
 }
index d168af4a4d7815f07024bf8609ae71fcdb18b882..46bb16c29cf68f1ac7520ca72cb35e0da52f9cde 100644 (file)
@@ -98,7 +98,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
        if (state == BACO_STATE_IN) {
                if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
                                             ARRAY_SIZE(pre_baco_tbl))) {
-                       if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
+                       if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco, NULL))
                                return -EINVAL;
 
                        if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
index f29f95be1e56288d7d8affe84de74f4776d26cff..675c7cab7cfc5992121aaab273e02e9df567a07a 100644 (file)
@@ -484,8 +484,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        if (data->registry_data.vr0hot_enabled)
                data->smu_features[GNLD_VR0HOT].supported = true;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
-       hwmgr->smu_version = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetSmuVersion,
+                       &hwmgr->smu_version);
                /* ACG firmware has major version 5 */
        if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
                data->smu_features[GNLD_ACG].supported = true;
@@ -503,10 +504,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
                data->smu_features[GNLD_PCC_LIMIT].supported = true;
 
        /* Get the SN to turn into a Unique ID */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
 }
@@ -993,7 +992,10 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
                        "Failed to set up led dpm config!",
                        return -EINVAL);
 
-       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_NumOfDisplays,
+                               0,
+                               NULL);
 
        return 0;
 }
@@ -2303,16 +2305,15 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
                                        data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
                        data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
-               agc_btc_response = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
 
                if (1 == agc_btc_response) {
                        if (1 == data->acg_loop_state)
-                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
+                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL);
                        else if (2 == data->acg_loop_state)
-                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL);
                        if (0 == vega10_enable_smc_features(hwmgr, true,
                                data->smu_features[GNLD_ACG].smu_feature_bitmap))
                                        data->smu_features[GNLD_ACG].enabled = true;
@@ -2429,11 +2430,9 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
        struct vega10_hwmgr *data = hwmgr->backend;
        AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        serial_number = ((uint64_t)bottom32 << 32) | top32;
 
@@ -2610,14 +2609,16 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
                if (0 != boot_up_values.usVddc) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetFloorSocVoltage,
-                                               (boot_up_values.usVddc * 4));
+                                               (boot_up_values.usVddc * 4),
+                                               NULL);
                        data->vbios_boot_state.bsoc_vddc_lock = true;
                } else {
                        data->vbios_boot_state.bsoc_vddc_lock = false;
                }
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepDcefclk,
-                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+                               NULL);
        }
 
        result = vega10_populate_avfs_parameters(hwmgr);
@@ -2904,7 +2905,8 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
 
        if (data->vbios_boot_state.bsoc_vddc_lock) {
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                               PPSMC_MSG_SetFloorSocVoltage, 0);
+                                               PPSMC_MSG_SetFloorSocVoltage, 0,
+                                               NULL);
                data->vbios_boot_state.bsoc_vddc_lock = false;
        }
 
@@ -2947,7 +2949,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                vega10_enable_disable_PCC_limit_feature(hwmgr, true);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+                       PPSMC_MSG_ConfigureTelemetry, data->config_telemetry,
+                       NULL);
 
                tmp_result = vega10_construct_voltage_tables(hwmgr);
                PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3528,7 +3531,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
                                data->dpm_table.gfx_table.dpm_state.soft_min_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMinGfxclkByIndex,
-                               data->smc_state_table.gfx_boot_level);
+                               data->smc_state_table.gfx_boot_level,
+                               NULL);
 
                        data->dpm_table.gfx_table.dpm_state.soft_min_level =
                                        data->smc_state_table.gfx_boot_level;
@@ -3543,11 +3547,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
                                socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMinSocclkByIndex,
-                                               socclk_idx);
+                                               socclk_idx,
+                                               NULL);
                        } else {
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMinUclkByIndex,
-                                               data->smc_state_table.mem_boot_level);
+                                               data->smc_state_table.mem_boot_level,
+                                               NULL);
                        }
                        data->dpm_table.mem_table.dpm_state.soft_min_level =
                                        data->smc_state_table.mem_boot_level;
@@ -3562,7 +3568,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
                                data->dpm_table.soc_table.dpm_state.soft_min_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMinSocclkByIndex,
-                               data->smc_state_table.soc_boot_level);
+                               data->smc_state_table.soc_boot_level,
+                               NULL);
                        data->dpm_table.soc_table.dpm_state.soft_min_level =
                                        data->smc_state_table.soc_boot_level;
                }
@@ -3582,7 +3589,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                        data->dpm_table.gfx_table.dpm_state.soft_max_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMaxGfxclkByIndex,
-                               data->smc_state_table.gfx_max_level);
+                               data->smc_state_table.gfx_max_level,
+                               NULL);
                        data->dpm_table.gfx_table.dpm_state.soft_max_level =
                                        data->smc_state_table.gfx_max_level;
                }
@@ -3593,7 +3601,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                        data->dpm_table.mem_table.dpm_state.soft_max_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSoftMaxUclkByIndex,
-                                       data->smc_state_table.mem_max_level);
+                                       data->smc_state_table.mem_max_level,
+                                       NULL);
                        data->dpm_table.mem_table.dpm_state.soft_max_level =
                                        data->smc_state_table.mem_max_level;
                }
@@ -3607,7 +3616,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                        data->dpm_table.soc_table.dpm_state.soft_max_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMaxSocclkByIndex,
-                               data->smc_state_table.soc_max_level);
+                               data->smc_state_table.soc_max_level,
+                               NULL);
                        data->dpm_table.soc_table.dpm_state.soft_max_level =
                                        data->smc_state_table.soc_max_level;
                }
@@ -3694,7 +3704,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
                /* This message will also enable SmcToHost Interrupt */
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetLowGfxclkInterruptThreshold,
-                               (uint32_t)low_sclk_interrupt_threshold);
+                               (uint32_t)low_sclk_interrupt_threshold,
+                               NULL);
        }
 
        return 0;
@@ -3801,8 +3812,7 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
        if (!query)
                return -EINVAL;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
-       value = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
 
        /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
        *query = value << 8;
@@ -3822,13 +3832,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
        switch (idx) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
-               sclk_mhz = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz);
                *((uint32_t *)value) = sclk_mhz * 100;
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
-               mclk_idx = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx);
                if (mclk_idx < dpm_table->mem_table.count) {
                        *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
                        *size = 4;
@@ -3837,8 +3845,8 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                }
                break;
        case AMDGPU_PP_SENSOR_GPU_LOAD:
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
-               activity_percent = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0,
+                                               &activity_percent);
                *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
                *size = 4;
                break;
@@ -3847,14 +3855,14 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
-               *((uint32_t *)value) = smum_get_argument(hwmgr) *
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value);
+               *((uint32_t *)value) = *((uint32_t *)value) *
                        PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_MEM_TEMP:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
-               *((uint32_t *)value) = smum_get_argument(hwmgr) *
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value);
+               *((uint32_t *)value) = *((uint32_t *)value) *
                        PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
                *size = 4;
                break;
@@ -3893,7 +3901,8 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 1 : 0);
+                       has_disp ? 1 : 0,
+                       NULL);
 }
 
 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3928,7 +3937,8 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                clk_request = (clk_freq << 16) | clk_select;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_RequestDisplayClockByFreq,
-                               clk_request);
+                               clk_request,
+                               NULL);
        }
 
        return result;
@@ -3990,7 +4000,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
                if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
                        smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       min_clocks.dcefClockInSR / 100);
+                                       min_clocks.dcefClockInSR / 100,
+                                       NULL);
                } else {
                        pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
                }
@@ -4000,7 +4011,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
 
        if (min_clocks.memoryClock != 0) {
                idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx,
+                                               NULL);
                data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
        }
 
@@ -4541,8 +4553,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                if (data->registry_data.sclk_dpm_key_disabled)
                        break;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
 
                if (hwmgr->pp_one_vf &&
                    (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
@@ -4558,8 +4569,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                if (data->registry_data.mclk_dpm_key_disabled)
                        break;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
 
                for (i = 0; i < mclk_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4570,8 +4580,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                if (data->registry_data.socclk_dpm_key_disabled)
                        break;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
 
                for (i = 0; i < soc_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4583,8 +4592,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        break;
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
-               now = smum_get_argument(hwmgr);
+                               PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
 
                for (i = 0; i < dcef_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4593,8 +4601,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        "*" : "");
                break;
        case PP_PCIE:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
 
                for (i = 0; i < pcie_table->count; i++)
                        size += sprintf(buf + size, "%d: %s %s\n", i,
@@ -4658,7 +4665,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 
        if (data->water_marks_bitmap & WaterMarksLoaded) {
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+                       NULL);
        }
 
        return result;
@@ -4924,21 +4932,26 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrHigh,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrLow,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrHigh,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrLow,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -5040,12 +5053,14 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetCustomGfxDpmParameters,
                                        busy_set_point | FPS<<8 |
-                                       use_rlc_busy << 16 | min_active_level<<24);
+                                       use_rlc_busy << 16 | min_active_level<<24,
+                                       NULL);
        }
 
 out:
        smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
-                                               1 << power_profile_mode);
+                                               1 << power_profile_mode,
+                                               NULL);
        hwmgr->power_profile_mode = power_profile_mode;
 
        return 0;
@@ -5302,7 +5317,7 @@ static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
                return 0;
        }
 
-       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
                            "[PrepareMp1] Failed!",
                            return ret);
 
index 0a677d4bc87b9a4621cef0edae6785929e03e63d..9757d47dd6b815bfe208676b435551b830fce5ff 100644 (file)
@@ -651,18 +651,6 @@ static const struct vega10_didt_config_reg   PSMSEEDCStallDelayConfig_Vega10[] =
        {   0xFFFFFFFF  }  /* End of list */
 };
 
-static const struct vega10_didt_config_reg   PSMSEEDCThresholdConfig_Vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                 Shift                                                  Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       /* SQ EDC THRESHOLD */
-       {   ixDIDT_SQ_EDC_THRESHOLD,           DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK,           DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT,            0x0000 },
-
-       {   0xFFFFFFFF  }  /* End of list */
-};
-
 static const struct vega10_didt_config_reg   PSMSEEDCCtrlResetConfig_Vega10[] =
 {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -707,17 +695,6 @@ static const struct vega10_didt_config_reg   PSMSEEDCCtrlConfig_Vega10[] =
        {   0xFFFFFFFF  }  /* End of list */
 };
 
-static const struct vega10_didt_config_reg   PSMGCEDCThresholdConfig_vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                 Shift                                                  Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       {   mmGC_EDC_THRESHOLD,                GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK,                GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT,                 0x0000000 },
-
-       {   0xFFFFFFFF  }  /* End of list */
-};
-
 static const struct vega10_didt_config_reg   PSMGCEDCDroopCtrlConfig_vega10[] =
 {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -925,7 +902,8 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
 
        /* For Vega10, SMC does not support any mask yet. */
        if (enable)
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info,
+                                               NULL);
 
 }
 
@@ -1327,7 +1305,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 
        if (data->registry_data.enable_pkg_pwr_tracking_feature)
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetPptLimit, n);
+                               PPSMC_MSG_SetPptLimit, n,
+                               NULL);
 
        return 0;
 }
@@ -1393,7 +1372,8 @@ static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
                uint32_t adjust_percent)
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+                       NULL);
 }
 
 int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
index ba8763daa3808b2a6fcd57f7ff6315d6bc8c3e5e..7783c7fd7ccb0a58e906d0440f25eab9e2612315 100644 (file)
@@ -31,8 +31,7 @@
 
 static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
 {
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
-       *current_rpm = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm, current_rpm);
        return 0;
 }
 
@@ -520,7 +519,8 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
-                               (uint32_t)table->FanTargetTemperature);
+                               (uint32_t)table->FanTargetTemperature,
+                               NULL);
 
        table->FanPwmMin = hwmgr->thermal_controller.
                        advanceFanControlParameters.usPWMMin * 255 / 100;
index 9d8ca94a8f0c1ef9b16bd9b41d889e54ae971683..bc53cce4f32d3995ba132d4b47079bd200282f9d 100644 (file)
@@ -96,7 +96,7 @@ int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
        if (state == BACO_STATE_IN) {
                if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
                                             ARRAY_SIZE(pre_baco_tbl))) {
-                       if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+                       if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL))
                                return -EINVAL;
 
                        if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
index aca61d1ff3c2a692b8d7bc7bed17b80e35154cf5..f4d1692cccf3a388b013b463f676941b1ee42902 100644 (file)
@@ -357,10 +357,8 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        }
 
        /* Get the SN to turn into a Unique ID */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
 }
@@ -483,16 +481,12 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDpmFreqByIndex,
-                       (clk_id << 16 | 0xFF));
+                       (clk_id << 16 | 0xFF),
+                       num_of_levels);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetNumOfDpmLevel] failed to get dpm levels!",
                        return ret);
 
-       *num_of_levels = smum_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE(*num_of_levels > 0,
-                       "[GetNumOfDpmLevel] number of clk levels is invalid!",
-                       return -EINVAL);
-
        return ret;
 }
 
@@ -504,12 +498,11 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
         *Lower 16 bits specify the level
         */
        PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
-               PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
+               PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index),
+               clock) == 0,
                "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
                return -EINVAL);
 
-       *clock = smum_get_argument(hwmgr);
-
        return 0;
 }
 
@@ -749,7 +742,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
                data->vbios_boot_state.vclock = boot_up_values.ulVClk;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepDcefclk,
-                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+                               NULL);
        }
 
        memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -767,11 +761,10 @@ static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
        uint32_t result;
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0,
                "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
                return -EINVAL);
 
-       result = smum_get_argument(hwmgr);
        PP_ASSERT_WITH_CODE(result == 1,
                        "Failed to run ACG BTC!", return -EINVAL);
 
@@ -792,12 +785,14 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
                                (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high,
+                       NULL) == 0,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
                return -1);
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low,
+                       NULL) == 0,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
                return -1);
 
@@ -828,7 +823,7 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
        bool enabled;
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0,
                "[EnableAllSMUFeatures] Failed to enable all smu features!",
                return -1);
 
@@ -854,7 +849,7 @@ static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
        bool enabled;
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0,
                "[DisableAllSMUFeatures] Failed to disable all smu features!",
                return -1);
 
@@ -879,7 +874,8 @@ static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
                uint32_t adjust_percent)
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+                       NULL);
 }
 
 static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
@@ -902,24 +898,24 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
 {
        /* AC Max */
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16),
+                       &(clock->ACMax)) == 0,
                "[GetClockRanges] Failed to get max ac clock from SMC!",
                return -EINVAL);
-       clock->ACMax = smum_get_argument(hwmgr);
 
        /* AC Min */
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16),
+                       &(clock->ACMin)) == 0,
                "[GetClockRanges] Failed to get min ac clock from SMC!",
                return -EINVAL);
-       clock->ACMin = smum_get_argument(hwmgr);
 
        /* DC Max */
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16),
+                       &(clock->DCMax)) == 0,
                "[GetClockRanges] Failed to get max dc clock from SMC!",
                return -EINVAL);
-       clock->DCMax = smum_get_argument(hwmgr);
 
        return 0;
 }
@@ -944,7 +940,7 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        int tmp_result, result = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0, NULL);
 
        result = vega12_set_allowed_featuresmask(hwmgr);
        PP_ASSERT_WITH_CODE(result == 0,
@@ -1043,7 +1039,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min gfxclk !",
                                        return ret);
        }
@@ -1052,14 +1049,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min memclk !",
                                        return ret);
 
                min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetHardMinByFreq,
-                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set hard min memclk !",
                                        return ret);
        }
@@ -1069,7 +1068,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min vclk!",
                                        return ret);
 
@@ -1077,7 +1077,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min dclk!",
                                        return ret);
        }
@@ -1087,7 +1088,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min eclk!",
                                        return ret);
        }
@@ -1097,7 +1099,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min socclk!",
                                        return ret);
        }
@@ -1107,7 +1110,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetHardMinByFreq,
-                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set hard min dcefclk!",
                                        return ret);
        }
@@ -1127,7 +1131,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max gfxclk!",
                                        return ret);
        }
@@ -1137,7 +1142,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max memclk!",
                                        return ret);
        }
@@ -1147,14 +1153,16 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max vclk!",
                                        return ret);
 
                max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max dclk!",
                                        return ret);
        }
@@ -1164,7 +1172,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max eclk!",
                                        return ret);
        }
@@ -1174,7 +1183,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max socclk!",
                                        return ret);
        }
@@ -1287,10 +1297,10 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
        *gfx_freq = 0;
 
        PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16),
+                       &gfx_clk) == 0,
                        "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
                        return -EINVAL);
-       gfx_clk = smum_get_argument(hwmgr);
 
        *gfx_freq = gfx_clk * 100;
 
@@ -1304,10 +1314,10 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
        *mclk_freq = 0;
 
        PP_ASSERT_WITH_CODE(
-                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
+                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16),
+                               &mem_clk) == 0,
                        "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
                        return -EINVAL);
-       mem_clk = smum_get_argument(hwmgr);
 
        *mclk_freq = mem_clk * 100;
 
@@ -1420,7 +1430,8 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
        if (data->smu_features[GNLD_DPM_UCLK].enabled)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 1 : 0);
+                       has_disp ? 1 : 0,
+                       NULL);
 
        return 0;
 }
@@ -1459,7 +1470,8 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                        clk_request = (clk_select << 16) | clk_freq;
                        result = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinByFreq,
-                                       clk_request);
+                                       clk_request,
+                                       NULL);
                }
        }
 
@@ -1493,7 +1505,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
                                PP_ASSERT_WITH_CODE(
                                        !smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       min_clocks.dcefClockInSR /100),
+                                       min_clocks.dcefClockInSR /100,
+                                       NULL),
                                        "Attempt to set divider for DCEFCLK Failed!",
                                        return -1);
                } else {
@@ -2124,10 +2137,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
        case PP_SOCCLK:
                PP_ASSERT_WITH_CODE(
                                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0,
+                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16),
+                                       &now) == 0,
                                "Attempt to get Current SOCCLK Frequency Failed!",
                                return -EINVAL);
-               now = smum_get_argument(hwmgr);
 
                PP_ASSERT_WITH_CODE(
                                vega12_get_socclocks(hwmgr, &clocks) == 0,
@@ -2142,10 +2155,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
        case PP_DCEFCLK:
                PP_ASSERT_WITH_CODE(
                                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0,
+                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16),
+                                       &now) == 0,
                                "Attempt to get Current DCEFCLK Frequency Failed!",
                                return -EINVAL);
-               now = smum_get_argument(hwmgr);
 
                PP_ASSERT_WITH_CODE(
                                vega12_get_dcefclocks(hwmgr, &clocks) == 0,
@@ -2343,7 +2356,8 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetHardMinByFreq,
-                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+                               NULL)),
                                "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
                                return ret);
        }
@@ -2357,7 +2371,8 @@ static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0,
+                       NULL);
 
        ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
                        &data->dpm_table.mem_table);
@@ -2383,7 +2398,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
                data->smu_features[GNLD_DPM_DCEFCLK].supported &&
                data->smu_features[GNLD_DPM_SOCCLK].supported)
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+                       NULL);
 
        return result;
 }
@@ -2555,21 +2571,26 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrHigh,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrLow,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrHigh,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrLow,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -2605,7 +2626,7 @@ static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        if (data->gfxoff_controlled_by_driver)
-               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL);
 
        return ret;
 }
@@ -2617,7 +2638,7 @@ static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        if (data->gfxoff_controlled_by_driver)
-               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL);
 
        return ret;
 }
@@ -2654,7 +2675,7 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
                return 0;
        }
 
-       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
                            "[PrepareMp1] Failed!",
                            return ret);
 
index 904eb2c9155b4b45ff21daee35eed31146cdd79f..c85806a6f62e3f6dcb185a9e6e6d38bbd9bf646e 100644 (file)
 static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
 {
        PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_GetCurrentRpm),
+                               PPSMC_MSG_GetCurrentRpm,
+                               current_rpm),
                        "Attempt to get current RPM from SMC Failed!",
                        return -EINVAL);
-       *current_rpm = smum_get_argument(hwmgr);
 
        return 0;
 }
@@ -259,7 +259,8 @@ int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
-                               (uint32_t)table->FanTargetTemperature);
+                               (uint32_t)table->FanTargetTemperature,
+                               NULL);
 
        return ret;
 }
index 9b5e72bdceca5ad1c8dd10285128bc00d811af31..2a28c9df15a02070eddaf764b5e526214df1db1f 100644 (file)
@@ -91,16 +91,16 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
                        WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
 
                        if(smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_EnterBaco, 0))
+                                       PPSMC_MSG_EnterBaco, 0, NULL))
                                return -EINVAL;
                } else {
                        if(smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_EnterBaco, 1))
+                                       PPSMC_MSG_EnterBaco, 1, NULL))
                                return -EINVAL;
                }
 
        } else if (state == BACO_STATE_OUT) {
-               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
+               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco, NULL))
                        return -EINVAL;
                if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
                                                     ARRAY_SIZE(clean_baco_tbl)))
@@ -118,5 +118,5 @@ int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
        if (ret)
                return ret;
 
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI, NULL);
 }
index 08b6ba39a6d7c3cdaa088486c7e26b17488f1683..9ff470f1b826cd570339eba862827b6c413bc659 100644 (file)
@@ -92,8 +92,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
         */
        data->registry_data.disallowed_features = 0xE0041C00;
        /* ECC feature should be disabled on old SMUs */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
-       hwmgr->smu_version = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
        if (hwmgr->smu_version < 0x282100)
                data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
 
@@ -400,10 +399,8 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        }
 
        /* Get the SN to turn into a Unique ID */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
 }
@@ -527,16 +524,12 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDpmFreqByIndex,
-                       (clk_id << 16 | 0xFF));
+                       (clk_id << 16 | 0xFF),
+                       num_of_levels);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetNumOfDpmLevel] failed to get dpm levels!",
                        return ret);
 
-       *num_of_levels = smum_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE(*num_of_levels > 0,
-                       "[GetNumOfDpmLevel] number of clk levels is invalid!",
-                       return -EINVAL);
-
        return ret;
 }
 
@@ -547,16 +540,12 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDpmFreqByIndex,
-                       (clk_id << 16 | index));
+                       (clk_id << 16 | index),
+                       clk);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetDpmFreqByIndex] failed to get dpm freq by index!",
                        return ret);
 
-       *clk = smum_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE(*clk,
-                       "[GetDpmFreqByIndex] clk value is invalid!",
-                       return -EINVAL);
-
        return ret;
 }
 
@@ -813,7 +802,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetMinDeepSleepDcefclk,
-               (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+               (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+                       NULL);
 
        memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
 
@@ -868,7 +858,8 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
         */
        smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverridePcieParameters, smu_pcie_arg);
+                       PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+                       NULL);
        PP_ASSERT_WITH_CODE(!ret,
                "[OverridePcieParameters] Attempt to override pcie params failed!",
                return ret);
@@ -899,13 +890,13 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
                                  & 0xFFFFFFFF));
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-               PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
+               PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
        PP_ASSERT_WITH_CODE(!ret,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
                return ret);
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-               PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
+               PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
        PP_ASSERT_WITH_CODE(!ret,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
                return ret);
@@ -915,12 +906,12 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
 
 static int vega20_run_btc(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
 }
 
 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
 }
 
 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -933,7 +924,8 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_EnableAllSmuFeatures)) == 0,
+                       PPSMC_MSG_EnableAllSmuFeatures,
+                       NULL)) == 0,
                        "[EnableAllSMUFeatures] Failed to enable all smu features!",
                        return ret);
 
@@ -966,7 +958,8 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
        if (data->smu_features[GNLD_DPM_UCLK].enabled)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       1);
+                       1,
+                       NULL);
 
        return 0;
 }
@@ -978,7 +971,8 @@ static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetFclkGfxClkRatio,
-                       data->registry_data.fclk_gfxclk_ratio);
+                       data->registry_data.fclk_gfxclk_ratio,
+                       NULL);
 }
 
 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -991,7 +985,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_DisableAllSmuFeatures)) == 0,
+                       PPSMC_MSG_DisableAllSmuFeatures,
+                       NULL)) == 0,
                        "[DisableAllSMUFeatures] Failed to disable all smu features!",
                        return ret);
 
@@ -1199,12 +1194,12 @@ static int vega20_od8_get_gfx_clock_base_voltage(
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetAVFSVoltageByDpm,
-                       ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+                       ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+                       voltage);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
                        return ret);
 
-       *voltage = smum_get_argument(hwmgr);
        *voltage = *voltage / VOLTAGE_SCALE;
 
        return 0;
@@ -1560,19 +1555,19 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDcModeMaxDpmFreq,
-                       (clock_select << 16))) == 0,
+                       (clock_select << 16),
+                       clock)) == 0,
                        "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
                        return ret);
-       *clock = smum_get_argument(hwmgr);
 
        /* if DC limit is zero, return AC limit */
        if (*clock == 0) {
                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetMaxDpmFreq,
-                       (clock_select << 16))) == 0,
+                       (clock_select << 16),
+                       clock)) == 0,
                        "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
                        return ret);
-               *clock = smum_get_argument(hwmgr);
        }
 
        return 0;
@@ -1641,7 +1636,8 @@ static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
        int result;
 
        result = smum_send_msg_to_smc(hwmgr,
-               PPSMC_MSG_SetMGpuFanBoostLimitRpm);
+               PPSMC_MSG_SetMGpuFanBoostLimitRpm,
+               NULL);
        PP_ASSERT_WITH_CODE(!result,
                        "[EnableMgpuFan] Failed to enable mgpu fan boost!",
                        return result);
@@ -1669,7 +1665,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        int result = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0, NULL);
 
        result = vega20_set_allowed_featuresmask(hwmgr);
        PP_ASSERT_WITH_CODE(!result,
@@ -1740,12 +1736,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                        return result);
 
        result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
-                       POWER_SOURCE_AC << 16);
+                       POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
        PP_ASSERT_WITH_CODE(!result,
                        "[GetPptLimit] get default PPT limit failed!",
                        return result);
        hwmgr->power_limit =
-               hwmgr->default_power_limit = smum_get_argument(hwmgr);
+               hwmgr->default_power_limit;
 
        return 0;
 }
@@ -1806,7 +1802,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
                min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min gfxclk !",
                                        return ret);
        }
@@ -1816,7 +1813,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
                min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min memclk !",
                                        return ret);
        }
@@ -1827,7 +1825,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min vclk!",
                                        return ret);
 
@@ -1835,7 +1834,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min dclk!",
                                        return ret);
        }
@@ -1846,7 +1846,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min eclk!",
                                        return ret);
        }
@@ -1857,7 +1858,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min socclk!",
                                        return ret);
        }
@@ -1868,7 +1870,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_FCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_FCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min fclk!",
                                        return ret);
        }
@@ -1879,7 +1882,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetHardMinByFreq,
-                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set hard min dcefclk!",
                                        return ret);
        }
@@ -1900,7 +1904,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max gfxclk!",
                                        return ret);
        }
@@ -1911,7 +1916,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max memclk!",
                                        return ret);
        }
@@ -1922,14 +1928,16 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max vclk!",
                                        return ret);
 
                max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max dclk!",
                                        return ret);
        }
@@ -1940,7 +1948,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max eclk!",
                                        return ret);
        }
@@ -1951,7 +1960,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max socclk!",
                                        return ret);
        }
@@ -1962,7 +1972,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_FCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_FCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max fclk!",
                                        return ret);
        }
@@ -2006,17 +2017,17 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
 
        if (max) {
                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
+                               PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
+                               clock)) == 0,
                                "[GetClockRanges] Failed to get max clock from SMC!",
                                return ret);
-               *clock = smum_get_argument(hwmgr);
        } else {
                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_GetMinDpmFreq,
-                               (clock_select << 16))) == 0,
+                               (clock_select << 16),
+                               clock)) == 0,
                                "[GetClockRanges] Failed to get min clock from SMC!",
                                return ret);
-               *clock = smum_get_argument(hwmgr);
        }
 
        return 0;
@@ -2122,10 +2133,10 @@ static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
        *clk_freq = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+                       PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
+                       clk_freq)) == 0,
                        "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
                        return ret);
-       *clk_freq = smum_get_argument(hwmgr);
 
        *clk_freq = *clk_freq * 100;
 
@@ -2276,7 +2287,8 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                        clk_request = (clk_select << 16) | clk_freq;
                        result = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinByFreq,
-                                       clk_request);
+                                       clk_request,
+                                       NULL);
                }
        }
 
@@ -2312,7 +2324,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
                        if (data->smu_features[GNLD_DS_DCEFCLK].supported)
                                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       min_clocks.dcefClockInSR / 100)) == 0,
+                                       min_clocks.dcefClockInSR / 100,
+                                       NULL)) == 0,
                                        "Attempt to set divider for DCEFCLK Failed!",
                                        return ret);
                } else {
@@ -2324,7 +2337,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
                dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetHardMinByFreq,
-                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+                               NULL)),
                                "[SetHardMinFreq] Set hard min uclk failed!",
                                return ret);
        }
@@ -2656,7 +2670,8 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
                        return -EINVAL;
 
                ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+                       PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
+                       NULL);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to set min link dpm level!",
                        return ret);
@@ -3140,7 +3155,7 @@ static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
                return 0;
        }
 
-       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
                            "[PrepareMp1] Failed!",
                            return ret);
 
@@ -3495,7 +3510,8 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetHardMinByFreq,
-                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+                               NULL)),
                                "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
                                return ret);
        }
@@ -3520,7 +3536,8 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
                dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMinByFreq,
-                               (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
+                               (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
+                               NULL)),
                                "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
                                return ret);
        }
@@ -3534,7 +3551,7 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0, NULL);
 
        ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
                        &data->dpm_table.mem_table);
@@ -3565,7 +3582,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
            data->smu_features[GNLD_DPM_SOCCLK].supported) {
                result = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_NumOfDisplays,
-                       hwmgr->display_config->num_display);
+                       hwmgr->display_config->num_display,
+                       NULL);
        }
 
        return result;
@@ -4082,7 +4100,8 @@ out:
        workload_type =
                conv_power_profile_to_pplib_workload(power_profile_mode);
        smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
-                                               1 << workload_type);
+                                               1 << workload_type,
+                                               NULL);
 
        hwmgr->power_profile_mode = power_profile_mode;
 
@@ -4098,21 +4117,26 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrHigh,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrLow,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrHigh,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrLow,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -4153,7 +4177,8 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
                                                  (acquire ?
                                                  PPSMC_MSG_RequestI2CBus :
                                                  PPSMC_MSG_ReleaseI2CBus),
-                                                 0);
+                                                 0,
+                                                 NULL);
 
        PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
        return res;
@@ -4170,7 +4195,8 @@ static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
                return -EINVAL;
        }
 
-       ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
+                               NULL);
        if (ret)
                pr_err("SetDfCstate failed!\n");
 
@@ -4184,7 +4210,8 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                                  PPSMC_MSG_SetXgmiMode,
-                                                 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+                                                 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+                                                 NULL);
        if (ret)
                pr_err("SetXgmiPstate failed!\n");
 
index a0bfb65cc5d6bb3659b3dc60f197936ece5ca03b..d7cc3d2d9e17d2425d90b0756008189dec63190b 100644 (file)
@@ -36,7 +36,8 @@ int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 
        if (data->smu_features[GNLD_PPT].enabled)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetPptLimit, n);
+                               PPSMC_MSG_SetPptLimit, n,
+                               NULL);
 
        return 0;
 }
@@ -51,7 +52,8 @@ static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
                uint32_t adjust_percent)
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+                       NULL);
 }
 
 int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
index ede54e87e287b5c838677e0739e60aa246aa7e65..7add2f60f49c4094ffac2603858e8a9d3b8712dd 100644 (file)
@@ -106,10 +106,10 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
        int ret = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_GetCurrentRpm)) == 0,
+                               PPSMC_MSG_GetCurrentRpm,
+                               current_rpm)) == 0,
                        "Attempt to get current RPM from SMC Failed!",
                        return ret);
-       *current_rpm = smum_get_argument(hwmgr);
 
        return 0;
 }
@@ -329,7 +329,8 @@ static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
-                               (uint32_t)table->FanTargetTemperature);
+                               (uint32_t)table->FanTargetTemperature,
+                               NULL);
 
        return ret;
 }
index ae2c318dd6fac4d8b6fb5de077d6f5d7b33778dd..4d1c2a44a8b6e927ed1f5757e01acd52fb8c6947 100644 (file)
@@ -405,7 +405,9 @@ struct smu_context
        bool pm_enabled;
        bool is_apu;
 
-       uint32_t smc_if_version;
+       uint32_t smc_driver_if_version;
+       uint32_t smc_fw_if_version;
+       uint32_t smc_fw_version;
 
        bool uploading_custom_pp_table;
        bool dc_controlled_by_gpio;
@@ -489,6 +491,7 @@ struct pptable_funcs {
        int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
                                   uint32_t dpm_level, uint32_t *freq);
        int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+       int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
        int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
        int (*i2c_eeprom_init)(struct i2c_adapter *control);
        void (*i2c_eeprom_fini)(struct i2c_adapter *control);
@@ -580,11 +583,6 @@ int smu_check_fw_status(struct smu_context *smu);
 
 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
 
-#define smu_i2c_eeprom_init(smu, control) \
-               ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
-#define smu_i2c_eeprom_fini(smu, control) \
-               ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
-
 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
 
 int smu_get_power_limit(struct smu_context *smu,
@@ -734,6 +732,7 @@ int smu_set_mp1_state(struct smu_context *smu,
                      enum pp_mp1_state mp1_state);
 int smu_set_df_cstate(struct smu_context *smu,
                      enum pp_df_cstate state);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
 
 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
                                         struct pp_smu_nv_clock_table *max_clocks);
index f736d773f9d629ad8e0bc4833ec95f20f1bdd70d..e07478b6ac04da590b0949cc599e4bec91a02eaa 100644 (file)
 #define PPSMC_MSG_SetNumBadHbmPagesRetired      0x3A
 
 #define PPSMC_MSG_DFCstateControl               0x3B
-#define PPSMC_Message_Count                     0x3C
+#define PPSMC_MSG_GmiPwrDnControl                0x3D
+#define PPSMC_Message_Count                      0x3E
 
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
index 2ffb666b97e6d609f99a0029d23f4dc9c0aa0ff7..15ed6cbdf36604ac893f0d72a7579915bed27974 100644 (file)
@@ -743,6 +743,7 @@ struct pp_hwmgr {
        bool pm_en;
        bool pp_one_vf;
        struct mutex smu_lock;
+       struct mutex msg_lock;
 
        uint32_t pp_table_version;
        void *device;
index ce5b5011c122ac86f82c3d386bab98e5e9a11921..8b82059d97e77ffadbbbecc746e691840321112e 100644 (file)
@@ -82,8 +82,8 @@
 // Other
 #define FEATURE_OUT_OF_BAND_MONITOR_BIT 24
 #define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25
+#define FEATURE_PER_PART_VMIN_BIT       26
 
-#define FEATURE_SPARE_26_BIT            26
 #define FEATURE_SPARE_27_BIT            27
 #define FEATURE_SPARE_28_BIT            28
 #define FEATURE_SPARE_29_BIT            29
 
 #define FEATURE_OUT_OF_BAND_MONITOR_MASK  (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT   )
 #define FEATURE_TEMP_DEPENDENT_VMIN_MASK  (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
+#define FEATURE_PER_PART_VMIN_MASK        (1 << FEATURE_PER_PART_VMIN_BIT        )
 
 
 //FIXME need updating
@@ -628,8 +629,14 @@ typedef struct {
   uint16_t BasePerformanceFrequencyCap;   //In Mhz
   uint16_t MaxPerformanceFrequencyCap;    //In Mhz
 
+  // Per-Part Vmin
+  uint16_t VDDGFX_VminLow;        // mv Q2
+  uint16_t VDDGFX_TVminLow;       //Celcius
+  uint16_t VDDGFX_VminLow_HiTemp; // mv Q2
+  uint16_t VDDGFX_VminLow_LoTemp; // mv Q2
+
   // SECTION: Reserved
-  uint32_t     Reserved[9];
+  uint32_t     Reserved[7];
 
   // SECTION: BOARD PARAMETERS
 
@@ -869,6 +876,10 @@ typedef struct {
   uint8_t   Mem_DownHystLimit;
   uint16_t  Mem_Fps;
 
+  uint32_t  BusyThreshold;                  // Q16
+  uint32_t  BusyHyst;
+  uint32_t  IdleHyst;
+
   uint32_t  MmHubPadding[8]; // SMU internal use
 } DpmActivityMonitorCoeffInt_t;
 
index 2f85a34c0591ad87585f0bdd6105ba94eef9a740..e9315eb5b48e1116c46b5359cab43b17457bce74 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 11
+#define SMU12_DRIVER_IF_VERSION 14
 
 typedef struct {
   int32_t value;
@@ -154,15 +154,19 @@ typedef enum {
 } CLOCK_IDs_e;
 
 // Throttler Status Bitmask
-#define THROTTLER_STATUS_BIT_SPL        0
-#define THROTTLER_STATUS_BIT_FPPT       1
-#define THROTTLER_STATUS_BIT_SPPT       2
-#define THROTTLER_STATUS_BIT_SPPT_APU   3
-#define THROTTLER_STATUS_BIT_THM_CORE   4
-#define THROTTLER_STATUS_BIT_THM_GFX    5
-#define THROTTLER_STATUS_BIT_THM_SOC    6
-#define THROTTLER_STATUS_BIT_TDC_VDD    7
-#define THROTTLER_STATUS_BIT_TDC_SOC    8
+#define THROTTLER_STATUS_BIT_SPL            0
+#define THROTTLER_STATUS_BIT_FPPT           1
+#define THROTTLER_STATUS_BIT_SPPT           2
+#define THROTTLER_STATUS_BIT_SPPT_APU       3
+#define THROTTLER_STATUS_BIT_THM_CORE       4
+#define THROTTLER_STATUS_BIT_THM_GFX        5
+#define THROTTLER_STATUS_BIT_THM_SOC        6
+#define THROTTLER_STATUS_BIT_TDC_VDD        7
+#define THROTTLER_STATUS_BIT_TDC_SOC        8
+#define THROTTLER_STATUS_BIT_PROCHOT_CPU    9
+#define THROTTLER_STATUS_BIT_PROCHOT_GFX   10
+#define THROTTLER_STATUS_BIT_EDC_CPU       11
+#define THROTTLER_STATUS_BIT_EDC_GFX       12
 
 typedef struct {
   uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz]
@@ -180,7 +184,7 @@ typedef struct {
   uint16_t Power[2];                    //[mW] indices: VDDCR_VDD, VDDCR_SOC
 
   uint16_t FanPwm;                      //[milli]
-  uint16_t CurrentSocketPower;          //[mW]
+  uint16_t CurrentSocketPower;          //[W]
 
   uint16_t CoreFrequency[8];            //[MHz]
   uint16_t CorePower[8];                //[mW]
@@ -193,10 +197,16 @@ typedef struct {
   uint16_t ThrottlerStatus;
   uint16_t spare;
 
-  uint16_t StapmOriginalLimit;          //[mW]
-  uint16_t StapmCurrentLimit;           //[mW]
-  uint16_t ApuPower;              //[mW]
-  uint16_t dGpuPower;               //[mW]
+  uint16_t StapmOriginalLimit;          //[W]
+  uint16_t StapmCurrentLimit;           //[W]
+  uint16_t ApuPower;                    //[W]
+  uint16_t dGpuPower;                   //[W]
+
+  uint16_t VddTdcValue;                 //[mA]
+  uint16_t SocTdcValue;                 //[mA]
+  uint16_t VddEdcValue;                 //[mA]
+  uint16_t SocEdcValue;                 //[mA]
+  uint16_t reserve[2];
 } SmuMetrics_t;
 
 
index a5b4df1467130f47432ee6e3a15f503b3f9d2fc7..ee7dac4693d4144c645010d44db1f5457ad17586 100644 (file)
        __SMU_DUMMY_MAP(SetSoftMinJpeg),              \
        __SMU_DUMMY_MAP(SetHardMinFclkByFreq),        \
        __SMU_DUMMY_MAP(DFCstateControl), \
+       __SMU_DUMMY_MAP(GmiPwrDnControl), \
        __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
        __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
 
index 674e426ed59bbe1d056b2371fbf5777492604f27..6b3b451a80188a588a0912e87be6e60d60b209c1 100644 (file)
@@ -27,8 +27,8 @@
 
 #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x14
+#define SMU11_DRIVER_IF_VERSION_NV10 0x36
 #define SMU11_DRIVER_IF_VERSION_NV12 0x33
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
 
@@ -37,7 +37,6 @@
 #define MP0_SRAM                       0x03900000
 #define MP1_Public                     0x03b00000
 #define MP1_SRAM                       0x03c00004
-#define MP1_SMC_SIZE           0x40000
 
 /* address block */
 #define smnMP1_FIRMWARE_FLAGS          0x3010024
index c5288831aa15c29b244203b5604ee734fc5e6a0e..ad100b533d0496c8f4f4c5be28c3ced01692208b 100644 (file)
@@ -81,16 +81,15 @@ enum SMU10_TABLE_ID {
        SMU10_CLOCKTABLE,
 };
 
-extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
-
 extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
 
 extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
 
-extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp);
 
 extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-                                       uint16_t msg, uint32_t parameter);
+                                       uint16_t msg, uint32_t parameter,
+                                       uint32_t *resp);
 
 extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
 
index 15030284b4446f4d75d67f4d17f24d1f246d7e09..0c9be864d072e7e06fb5e5e2d0d05ee56beaaacf 100644 (file)
@@ -423,6 +423,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
        struct smu_table_context *table_context = &smu->smu_table;
        PPTable_t *smc_pptable = table_context->driver_pptable;
        struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
+       struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
        int index, ret;
 
        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -433,77 +434,33 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
        if (ret)
                return ret;
 
-       memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
-              sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS);
-
-       /* SVI2 Board Parameters */
-       smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx;
-       smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc;
-       smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping;
-       smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping;
-       smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping;
-       smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping;
-       smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask;
-       smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask;
-       smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent;
-       smc_pptable->Padding8_V = smc_dpm_table->Padding8_V;
-
-       /* Telemetry Settings */
-       smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent;
-       smc_pptable->GfxOffset = smc_dpm_table->GfxOffset;
-       smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx;
-       smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent;
-       smc_pptable->SocOffset = smc_dpm_table->SocOffset;
-       smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc;
-       smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent;
-       smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset;
-       smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0;
-       smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent;
-       smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset;
-       smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1;
-
-       /* GPIO Settings */
-       smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio;
-       smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity;
-       smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio;
-       smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity;
-       smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio;
-       smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity;
-       smc_pptable->GthrGpio = smc_dpm_table->GthrGpio;
-       smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity;
-
-       /* LED Display Settings */
-       smc_pptable->LedPin0 = smc_dpm_table->LedPin0;
-       smc_pptable->LedPin1 = smc_dpm_table->LedPin1;
-       smc_pptable->LedPin2 = smc_dpm_table->LedPin2;
-       smc_pptable->padding8_4 = smc_dpm_table->padding8_4;
-
-       /* GFXCLK PLL Spread Spectrum */
-       smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled;
-       smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent;
-       smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq;
-
-       /* GFXCLK DFLL Spread Spectrum */
-       smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled;
-       smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent;
-       smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq;
-
-       /* UCLK Spread Spectrum */
-       smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled;
-       smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent;
-       smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq;
-
-       /* SOCCLK Spread Spectrum */
-       smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled;
-       smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent;
-       smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq;
-
-       /* Total board power */
-       smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower;
-       smc_pptable->BoardPadding = smc_dpm_table->BoardPadding;
-
-       /* Mvdd Svi2 Div Ratio Setting */
-       smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
+       pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
+                       smc_dpm_table->table_header.format_revision,
+                       smc_dpm_table->table_header.content_revision);
+
+       if (smc_dpm_table->table_header.format_revision != 4) {
+               pr_err("smc_dpm_info table format revision is not 4!\n");
+               return -EINVAL;
+       }
+
+       switch (smc_dpm_table->table_header.content_revision) {
+       case 5: /* nv10 and nv14 */
+               memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
+                       sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+               break;
+       case 7: /* nv12 */
+               ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+                                             (uint8_t **)&smc_dpm_table_v4_7);
+               if (ret)
+                       return ret;
+               memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
+                       sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+               break;
+       default:
+               pr_err("smc_dpm_info with unsupported content revision %d!\n",
+                               smc_dpm_table->table_header.content_revision);
+               return -EINVAL;
+       }
 
        if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
                /* TODO: remove it once SMU fw fix it */
@@ -1336,8 +1293,6 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
        }
 
        if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
-               if (size < 0)
-                       return -EINVAL;
 
                ret = smu_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -1860,7 +1815,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
        int power_src;
 
        if (!smu->power_limit) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+               if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT) &&
+                       !amdgpu_sriov_vf(smu->adev)) {
                        power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
                        if (power_src < 0)
                                return -EINVAL;
@@ -2003,6 +1959,9 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
        OverDriveTable_t *od_table, *boot_od_table;
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
        if (ret)
                return ret;
index ff73a735b8882a4844ac87dda3ca75588e6fe453..67476047c067d1d0f99b7ecba83ca2a00ed90dca 100644 (file)
@@ -296,6 +296,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 
        for (i = 0; i < count; i++) {
                GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+               if (!value)
+                       continue;
                size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
                                cur_value == value ? "*" : "");
                if (cur_value == value)
@@ -847,7 +849,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
        uint32_t i, size = 0;
        int16_t workload_type = 0;
 
-       if (!smu->pm_enabled || !buf)
+       if (!buf)
                return -EINVAL;
 
        for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -895,12 +897,17 @@ static int renoir_read_sensor(struct smu_context *smu,
 
 static bool renoir_is_dpm_running(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
+
        /*
-        * Util now, the pmfw hasn't exported the interface of SMU
+        * Until now, the pmfw hasn't exported the interface of SMU
         * feature mask to APU SKU so just force on all the feature
         * at early initial stage.
         */
-       return true;
+       if (adev->in_suspend)
+               return false;
+       else
+               return true;
 
 }
 
@@ -950,6 +957,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 void renoir_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &renoir_ppt_funcs;
-       smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
+       smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
        smu->is_apu = true;
 }
index 40c35bcc5a0a2c9a358e6773375e156c7a5bc01c..c97444841abcb40e6dd544fe87affc383391e2b0 100644 (file)
@@ -214,4 +214,9 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
 #define smu_set_power_source(smu, power_src) \
        ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
 
+#define smu_i2c_eeprom_init(smu, control) \
+               ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : 0)
+#define smu_i2c_eeprom_fini(smu, control) \
+               ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : 0)
+
 #endif
index 541c932a60051bb9c00215ca22e7924892c1d355..3391d1c4bbbedf3e8ec1cc7d4410d9eea6c0062f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/firmware.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/reboot.h>
 
 #define SMU_11_0_PARTIAL_PPTABLE
 
@@ -57,7 +58,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
                                              uint16_t msg)
 {
        struct amdgpu_device *adev = smu->adev;
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
        return 0;
 }
 
@@ -65,7 +66,7 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+       *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
        return 0;
 }
 
@@ -75,7 +76,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
        uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
 
        for (i = 0; i < timeout; i++) {
-               cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+               cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
                        return cur_value == 0x1 ? 0 : -EIO;
 
@@ -83,7 +84,10 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
        }
 
        /* timeout means wrong logic */
-       return -ETIME;
+       if (i == timeout)
+               return -ETIME;
+
+       return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
 }
 
 int
@@ -107,9 +111,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
                goto out;
        }
 
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
 
        smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
 
@@ -119,6 +123,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
                       smu_get_message_name(smu, msg), index, param, ret);
                goto out;
        }
+
        if (read_arg) {
                ret = smu_v11_0_read_arg(smu, read_arg);
                if (ret) {
@@ -201,13 +206,15 @@ int smu_v11_0_load_microcode(struct smu_context *smu)
        const struct smc_firmware_header_v1_0 *hdr;
        uint32_t addr_start = MP1_SRAM;
        uint32_t i;
+       uint32_t smc_fw_size;
        uint32_t mp1_fw_flags;
 
        hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
        src = (const uint32_t *)(adev->pm.fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+       smc_fw_size = hdr->header.ucode_size_bytes;
 
-       for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
+       for (i = 1; i < smc_fw_size/4 - 1; i++) {
                WREG32_PCIE(addr_start, src[i]);
                addr_start += 4;
        }
@@ -264,23 +271,23 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
 
        switch (smu->adev->asic_type) {
        case CHIP_VEGA20:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
                break;
        case CHIP_ARCTURUS:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
                break;
        case CHIP_NAVI10:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
                break;
        case CHIP_NAVI12:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
                break;
        case CHIP_NAVI14:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
                break;
        default:
                pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
                break;
        }
 
@@ -292,10 +299,10 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
         * Considering above, we just leave user a warning message instead
         * of halt driver loading.
         */
-       if (if_version != smu->smc_if_version) {
+       if (if_version != smu->smc_driver_if_version) {
                pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
                        "smu fw version = 0x%08x (%d.%d.%d)\n",
-                       smu->smc_if_version, if_version,
+                       smu->smc_driver_if_version, if_version,
                        smu_version, smu_major, smu_minor, smu_debug);
                pr_warn("SMU driver if version not matched\n");
        }
@@ -479,8 +486,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
 
-       if (!smu->pm_enabled)
-               return 0;
        if (smu_power->power_context || smu_power->power_context_size != 0)
                return -EINVAL;
 
@@ -497,8 +502,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
 
-       if (!smu->pm_enabled)
-               return 0;
        if (!smu_power->power_context || smu_power->power_context_size == 0)
                return -EINVAL;
 
@@ -730,8 +733,9 @@ int smu_v11_0_parse_pptable(struct smu_context *smu)
        struct smu_table_context *table_context = &smu->smu_table;
        struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
 
+       /* during TDR we need to free and alloc the pptable */
        if (table_context->driver_pptable)
-               return -EINVAL;
+               kfree(table_context->driver_pptable);
 
        table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
 
@@ -771,6 +775,9 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
 {
        int ret;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        ret = smu_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
        if (ret)
@@ -783,8 +790,6 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
 {
        struct smu_table_context *table_context = &smu->smu_table;
 
-       if (!smu->pm_enabled)
-               return 0;
        if (!table_context)
                return -EINVAL;
 
@@ -816,6 +821,9 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
        int ret = 0;
        struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (tool_table->mc_address) {
                ret = smu_send_smc_msg_with_param(smu,
                                SMU_MSG_SetToolsDramAddrHigh,
@@ -835,6 +843,9 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
 {
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (!smu->pm_enabled)
                return ret;
 
@@ -849,6 +860,9 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        mutex_lock(&feature->mutex);
        if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
                goto failed;
@@ -877,6 +891,9 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
        struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
+               return 0;
+
        if (!feature_mask || num < 2)
                return -EINVAL;
 
@@ -932,8 +949,12 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
 {
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (!smu->pm_enabled)
                return ret;
+
        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
            smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@ -948,9 +969,6 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
        int ret = 0;
        int clk_id;
 
-       if (!smu->pm_enabled)
-               return ret;
-
        if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
            (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
                return 0;
@@ -1096,6 +1114,9 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
        int ret = 0;
        uint32_t max_power_limit;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        max_power_limit = smu_v11_0_get_max_power_limit(smu);
 
        if (n > max_power_limit) {
@@ -1205,9 +1226,6 @@ int smu_v11_0_start_thermal_control(struct smu_context *smu)
        struct smu_temperature_range range;
        struct amdgpu_device *adev = smu->adev;
 
-       if (!smu->pm_enabled)
-               return ret;
-
        memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
 
        ret = smu_get_thermal_temperature_range(smu, &range);
@@ -1321,9 +1339,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
        enum smu_clk_type clk_select = 0;
        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
 
-       if (!smu->pm_enabled)
-               return -EINVAL;
-
        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
                smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                switch (clk_type) {
@@ -1533,12 +1548,19 @@ static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
 #define THM_11_0__SRCID__THM_DIG_THERM_L2H             0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
 #define THM_11_0__SRCID__THM_DIG_THERM_H2L             1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
 
+#define SMUIO_11_0__SRCID__SMUIO_GPIO19                        83
+
 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
        uint32_t client_id = entry->client_id;
        uint32_t src_id = entry->src_id;
+       /*
+        * ctxid is used to distinguish different
+        * events for SMCToHost interrupt.
+        */
+       uint32_t ctxid = entry->src_data[0];
 
        if (client_id == SOC15_IH_CLIENTID_THM) {
                switch (src_id) {
@@ -1547,6 +1569,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                                PCI_BUS_NUM(adev->pdev->devfn),
                                PCI_SLOT(adev->pdev->devfn),
                                PCI_FUNC(adev->pdev->devfn));
+                       /*
+                        * SW CTF just occurred.
+                        * Try to do a graceful shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n");
+                       orderly_poweroff(true);
                break;
                case THM_11_0__SRCID__THM_DIG_THERM_H2L:
                        pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
@@ -1561,11 +1589,30 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                                PCI_SLOT(adev->pdev->devfn),
                                PCI_FUNC(adev->pdev->devfn));
                break;
-
                }
+       } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+               pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
+                               PCI_BUS_NUM(adev->pdev->devfn),
+                               PCI_SLOT(adev->pdev->devfn),
+                               PCI_FUNC(adev->pdev->devfn));
+               /*
+                * HW CTF just occurred. Shutdown to prevent further damage.
+                */
+               dev_emerg(adev->dev, "System is going to shutdown due to HW CTF!\n");
+               orderly_poweroff(true);
        } else if (client_id == SOC15_IH_CLIENTID_MP1) {
-               if (src_id == 0xfe)
-                       smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+               if (src_id == 0xfe) {
+                       switch (ctxid) {
+                       case 0x3:
+                               dev_dbg(adev->dev, "Switched to AC mode!\n");
+                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               break;
+                       case 0x4:
+                               dev_dbg(adev->dev, "Switched to DC mode!\n");
+                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               break;
+                       }
+               }
        }
 
        return 0;
@@ -1605,6 +1652,13 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
        if (ret)
                return ret;
 
+       /* Register CTF(GPIO_19) interrupt */
+       ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
+                               SMUIO_11_0__SRCID__SMUIO_GPIO19,
+                               irq_src);
+       if (ret)
+               return ret;
+
        ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
                                0xfe,
                                irq_src);
@@ -1718,6 +1772,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
                if (ret)
                        goto out;
 
+               if (ras && ras->supported) {
+                       ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+                       if (ret)
+                               goto out;
+               }
+
                /* clear vbios scratch 6 and 7 for coming asic reinit */
                WREG32(adev->bios_scratch_reg_offset + 6, 0);
                WREG32(adev->bios_scratch_reg_offset + 7, 0);
@@ -1827,6 +1887,9 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
        uint32_t pcie_gen = 0, pcie_width = 0;
        int ret;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
                pcie_gen = 3;
        else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
index 169ebdad87b87be62a8d58d754bee8a29565b7ee..4023d10fb49bb50ec80532c3018152c27b9a0db5 100644 (file)
 
 #include "asic_reg/mp/mp_12_0_0_offset.h"
 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
+#include "asic_reg/smuio/smuio_12_0_0_offset.h"
+#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
 
-#define smnMP1_FIRMWARE_FLAGS                                0x3010024
+// because some SMU12 based ASICs use older ip offset tables
+// we should undefine this register from the smuio12 header
+// to prevent confusion down the road
+#undef mmPWR_MISC_CNTL_STATUS
 
-#define mmSMUIO_GFX_MISC_CNTL                                0x00c8
-#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                       0
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK          0x00000006L
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT        0x1
+#define smnMP1_FIRMWARE_FLAGS                                0x3010024
 
 int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
                                              uint16_t msg)
@@ -158,10 +160,10 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
         * Considering above, we just leave user a warning message instead
         * of halt driver loading.
         */
-       if (if_version != smu->smc_if_version) {
+       if (if_version != smu->smc_driver_if_version) {
                pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
                        "smu fw version = 0x%08x (%d.%d.%d)\n",
-                       smu->smc_if_version, if_version,
+                       smu->smc_driver_if_version, if_version,
                        smu_version, smu_major, smu_minor, smu_debug);
                pr_warn("SMU driver if version not matched\n");
        }
index 868e2d5f6e621e1c04980314de23b6f9180e2353..85e5b1ed22c2993e356b8cd5022cc999806619c3 100644 (file)
@@ -2780,7 +2780,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -2810,12 +2810,12 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -2845,7 +2845,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -2881,8 +2881,9 @@ static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
                        break;
        }
-       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.uvd_dpm_enable_mask,
+                               NULL);
 
        return 0;
 }
@@ -2912,8 +2913,9 @@ static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
                if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
                        break;
        }
-       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               data->dpm_level_enable_mask.vce_dpm_enable_mask);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.vce_dpm_enable_mask,
+                               NULL);
 
        return 0;
 }
index 32ebb383c4568f623599154761e7356229d12987..ecb9ee46d6b35b8c57bdf705580f197024492fc1 100644 (file)
@@ -137,9 +137,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
        PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
                        INTERRUPTS_ENABLED, 1);
 
-       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
-       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-       PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
 
        /* Wait for done bit to be set */
        PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
@@ -203,8 +201,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
        struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
 
        if (0 != smu_data->avfs_btc_param) {
-               if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+                               NULL)) {
                        pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
                        result = -EINVAL;
                }
@@ -1913,7 +1912,8 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
        if (mask)
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                    PPSMC_MSG_LedConfig,
-                                                   mask);
+                                                   mask,
+                                                   NULL);
        return 0;
 }
 
@@ -2220,14 +2220,16 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanMinPwm,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
+                               advanceFanControlParameters.ucMinimumPWMLimit,
+                               NULL);
 
        if (!res && hwmgr->thermal_controller.
                        advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanSclkTarget,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+                               NULL);
 
        if (res)
                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2242,7 +2244,7 @@ static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
        if (!hwmgr->avfs_supported)
                return 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
 
        return 0;
 }
@@ -2390,7 +2392,8 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -2422,7 +2425,8 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -2569,7 +2573,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -2599,12 +2603,12 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -2634,7 +2638,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -2649,6 +2653,7 @@ const struct pp_smumgr_func fiji_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = &smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .update_smc_table = fiji_update_smc_table,
index 732005c03a82d89f7528ee067ea93c3f259d7195..431ad2fd38df19b2cc2e54e4000acd8df65d4670 100644 (file)
@@ -2669,6 +2669,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
        .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
        .send_msg_to_smc = &smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .get_offsetof = iceland_get_offsetof,
index 23c12018dbc18c582a99610d8575d6555861c7f7..c3d2e6dcf62a09387e3f86f60234a3bea75747a4 100644 (file)
@@ -99,7 +99,8 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
        struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
 
        if (0 != smu_data->avfs_btc_param) {
-               if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+                                       NULL)) {
                        pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
                        result = -1;
                }
@@ -2049,15 +2050,16 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
                return 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting,
+                       NULL);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
 
        /* Apply avfs cks-off voltages to avoid the overshoot
         * when switching to the highest sclk frequency
         */
        if (data->apply_avfs_cks_off_voltage)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage, NULL);
 
        return 0;
 }
@@ -2158,14 +2160,16 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanMinPwm,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
+                               advanceFanControlParameters.ucMinimumPWMLimit,
+                               NULL);
 
        if (!res && hwmgr->thermal_controller.
                        advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanSclkTarget,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+                               NULL);
 
        if (res)
                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2202,7 +2206,8 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -2234,7 +2239,8 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -2485,7 +2491,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -2515,12 +2521,12 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -2550,7 +2556,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -2565,6 +2571,7 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .update_smc_table = polaris10_update_smc_table,
index 2319400a3fcb60d046323d422e7c9484ccfeaa32..801ba9ca6094381603c22f5c03f2e80ee9cbfb21 100644 (file)
@@ -126,15 +126,18 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL;);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL;);
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        /* flush hdp cache */
        amdgpu_asic_flush_hdp(adev, NULL);
@@ -164,15 +167,18 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        return 0;
 }
@@ -181,9 +187,9 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
 {
        uint32_t smc_driver_if_version;
 
-       smu10_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetDriverIfVersion);
-       smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+       smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetDriverIfVersion,
+                       &smc_driver_if_version);
 
        if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) &&
            (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) {
@@ -217,8 +223,7 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
-       hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
        adev->pm.fw_version = hwmgr->smu_version >> 8;
 
        if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
index 3f51d545e8ff3931dcfe57c0f859c464dcdf8adb..aae25243eb10d33a5ea84903ea790e2daa0213d3 100644 (file)
@@ -191,13 +191,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
        return 0;
 }
 
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
 int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
 {
        PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -207,25 +200,14 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, ui
        return smu7_send_msg_to_smc(hwmgr, msg);
 }
 
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr)
 {
-       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
-
-       return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
+       return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
 }
 
 int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
 {
-       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
-       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-       PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
-
-       if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
-               pr_info("Failed to send Message.\n");
-
-       return 0;
+       return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
 }
 
 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
@@ -353,12 +335,14 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
 
        if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
                if (hwmgr->not_vf) {
-                       smu7_send_msg_to_smc_with_parameter(hwmgr,
+                       smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SMU_DRAM_ADDR_HI,
-                                               upper_32_bits(smu_data->smu_buffer.mc_addr));
-                       smu7_send_msg_to_smc_with_parameter(hwmgr,
+                                               upper_32_bits(smu_data->smu_buffer.mc_addr),
+                                               NULL);
+                       smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SMU_DRAM_ADDR_LO,
-                                               lower_32_bits(smu_data->smu_buffer.mc_addr));
+                                               lower_32_bits(smu_data->smu_buffer.mc_addr),
+                                               NULL);
                }
                fw_to_load = UCODE_ID_RLC_G_MASK
                           + UCODE_ID_SDMA0_MASK
@@ -423,10 +407,16 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
        }
        memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
                    sizeof(struct SMU_DRAMData_TOC));
-       smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
-       smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
-
-       smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_DRV_DRAM_ADDR_HI,
+                       upper_32_bits(smu_data->header_buffer.mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_DRV_DRAM_ADDR_LO,
+                       lower_32_bits(smu_data->header_buffer.mc_addr),
+                       NULL);
+
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL);
 
        r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
        if (!r)
index 01f0538fba6b9e8edc20febbda4178dd4aa545d4..e7303dc8c260b197146fca4d29bfc443c6f0d5e4 100644 (file)
@@ -60,11 +60,9 @@ int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
 int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
 bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
 int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
 int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
                                                uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
-                                               uint16_t msg, uint32_t parameter);
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr);
 int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
 
 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
index 7dca04a8921730a3eb046275253f9464322f7bbe..76d4f12ceedf3babe753f7f5cab5f507ba684ef6 100644 (file)
@@ -610,18 +610,21 @@ static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
 
        *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrHi,
-                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrLo,
-                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-                               smu8_smu->toc_entry_clock_table);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+                               smu8_smu->toc_entry_clock_table,
+                               NULL);
 
-       smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL);
 
        return 0;
 }
@@ -637,18 +640,21 @@ static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
                        break;
        }
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrHi,
-                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrLo,
-                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-                               smu8_smu->toc_entry_clock_table);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+                               smu8_smu->toc_entry_clock_table,
+                               NULL);
 
-       smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL);
 
        return 0;
 }
@@ -671,25 +677,30 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
 
        smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DriverDramAddrHi,
-                                       upper_32_bits(smu8_smu->toc_buffer.mc_addr));
+                                       upper_32_bits(smu8_smu->toc_buffer.mc_addr),
+                                       NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DriverDramAddrLo,
-                                       lower_32_bits(smu8_smu->toc_buffer.mc_addr));
+                                       lower_32_bits(smu8_smu->toc_buffer.mc_addr),
+                                       NULL);
 
-       smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_ExecuteJob,
-                                       smu8_smu->toc_entry_aram);
-       smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-                               smu8_smu->toc_entry_power_profiling_index);
+                                       smu8_smu->toc_entry_aram,
+                                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+                               smu8_smu->toc_entry_power_profiling_index,
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_ExecuteJob,
-                                       smu8_smu->toc_entry_initialize_index);
+                                       smu8_smu->toc_entry_initialize_index,
+                                       NULL);
 
        fw_to_check = UCODE_ID_RLC_G_MASK |
                        UCODE_ID_SDMA0_MASK |
@@ -860,11 +871,13 @@ static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
                                unsigned long check_feature)
 {
        int result;
-       unsigned long features;
+       uint32_t features;
 
-       result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+       result = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_GetFeatureStatus,
+                               0,
+                               &features);
        if (result == 0) {
-               features = smum_get_argument(hwmgr);
                if (features & check_feature)
                        return true;
        }
index 4240aeec9000e9f0e9677d741dfa4d0f180fa196..b6fb480668416a92892f5e6d85122d0a5ac2eb8b 100644 (file)
@@ -103,14 +103,6 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
-{
-       if (NULL != hwmgr->smumgr_funcs->get_argument)
-               return hwmgr->smumgr_funcs->get_argument(hwmgr);
-
-       return 0;
-}
-
 uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
 {
        if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
@@ -135,22 +127,58 @@ int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp)
 {
-       if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
+       int ret = 0;
+
+       if (hwmgr == NULL ||
+           hwmgr->smumgr_funcs->send_msg_to_smc == NULL ||
+           (resp && !hwmgr->smumgr_funcs->get_argument))
                return -EINVAL;
 
-       return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+       mutex_lock(&hwmgr->msg_lock);
+
+       ret = hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+       if (ret) {
+               mutex_unlock(&hwmgr->msg_lock);
+               return ret;
+       }
+
+       if (resp)
+               *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+       mutex_unlock(&hwmgr->msg_lock);
+
+       return ret;
 }
 
 int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-                                       uint16_t msg, uint32_t parameter)
+                                       uint16_t msg,
+                                       uint32_t parameter,
+                                       uint32_t *resp)
 {
+       int ret = 0;
+
        if (hwmgr == NULL ||
-               hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
+           hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL ||
+           (resp && !hwmgr->smumgr_funcs->get_argument))
                return -EINVAL;
-       return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+
+       mutex_lock(&hwmgr->msg_lock);
+
+       ret = hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
                                                hwmgr, msg, parameter);
+       if (ret) {
+               mutex_unlock(&hwmgr->msg_lock);
+               return ret;
+       }
+
+       if (resp)
+               *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+       mutex_unlock(&hwmgr->msg_lock);
+
+       return ret;
 }
 
 int smum_init_smc_table(struct pp_hwmgr *hwmgr)
index f19bac7ef7ba6cfa15dc906a65c1124c605d91f2..398e7e3587de141d455ba95dafc2879754580754 100644 (file)
@@ -2702,7 +2702,8 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -2733,7 +2734,8 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
                                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -3168,7 +3170,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -3198,12 +3200,12 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -3233,7 +3235,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -3248,6 +3250,7 @@ const struct pp_smumgr_func tonga_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = &smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .update_smc_table = tonga_update_smc_table,
index 715564009089615e6ce9b0dbc86ec2fe78307a9f..1e222c5d91a455e2b7b974ae54dffd49a7c2949e 100644 (file)
@@ -47,15 +47,18 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        /* flush hdp cache */
        amdgpu_asic_flush_hdp(adev, NULL);
@@ -90,15 +93,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        return 0;
 }
@@ -118,17 +124,21 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
                return 0;
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       msg, feature_mask);
+                       msg, feature_mask, NULL);
 }
 
 int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
                            uint64_t *features_enabled)
 {
+       uint32_t enabled_features;
+
        if (features_enabled == NULL)
                return -EINVAL;
 
-       smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
-       *features_enabled = smu9_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeatures,
+                       &enabled_features);
+       *features_enabled = enabled_features;
 
        return 0;
 }
@@ -150,12 +160,14 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
        struct vega10_smumgr *priv = hwmgr->smu_backend;
 
        if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
-               smu9_send_msg_to_smc_with_parameter(hwmgr,
+               smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
-                               upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
-               smu9_send_msg_to_smc_with_parameter(hwmgr,
+                               upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+                               NULL);
+               smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrLow,
-                               lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+                               lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+                               NULL);
        }
        return 0;
 }
@@ -167,11 +179,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
        uint32_t dev_id;
        uint32_t rev_id;
 
-       PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetDriverIfVersion),
+       PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetDriverIfVersion,
+                       &smc_driver_if_version),
                        "Attempt to get SMC IF Version Number Failed!",
                        return -EINVAL);
-       smc_driver_if_version = smu9_get_argument(hwmgr);
 
        dev_id = adev->pdev->device;
        rev_id = adev->pdev->revision;
index 275dbf65f1a0c5428e21096948806448db7fbf5e..f54df76537e4b197b246234ba3b00af4a38f069a 100644 (file)
@@ -50,18 +50,21 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       table_id) == 0,
+                       table_id,
+                       NULL) == 0,
                        "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
                        return -EINVAL);
 
@@ -98,19 +101,22 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
                        return -EINVAL;);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
-                       table_id) == 0,
+                       table_id,
+                       NULL) == 0,
                        "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
                        return -EINVAL);
 
@@ -126,21 +132,21 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
        smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
 
        if (enable) {
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
                                return -EINVAL);
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
                                return -EINVAL);
        } else {
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
                                return -EINVAL);
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
                                return -EINVAL);
        }
@@ -156,17 +162,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
        if (features_enabled == NULL)
                return -EINVAL;
 
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesLow,
+                       &smc_features_low) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
                        return -EINVAL);
-       smc_features_low = smu9_get_argument(hwmgr);
 
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+                       &smc_features_high) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
                        return -EINVAL);
-       smc_features_high = smu9_get_argument(hwmgr);
 
        *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
                        (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -192,12 +198,14 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
                        (struct vega12_smumgr *)(hwmgr->smu_backend);
 
        if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
-               if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
+               if (!smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
-                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
-                       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                               NULL))
+                       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetToolsDramAddrLow,
-                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                                       NULL);
        }
        return 0;
 }
index 16aa171971d3aaa6ad81af88d27f53c385a3fdb7..2fb97554134f5aeaf8592e888cafff9cca236458 100644 (file)
@@ -175,18 +175,20 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
                        "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
                        return ret);
 
@@ -224,18 +226,20 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
                        "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
                        return ret);
 
@@ -255,18 +259,22 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_TransferTableDram2Smu,
+                       TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
+                       NULL)) == 0,
                        "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
                        return ret);
 
@@ -281,19 +289,21 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
        struct amdgpu_device *adev = hwmgr->adev;
        int ret = 0;
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+                       TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
                        "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
                        return ret);
 
@@ -316,21 +326,21 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
        smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
 
        if (enable) {
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
                                return ret);
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
                                return ret);
        } else {
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
                                return ret);
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
                                return ret);
        }
@@ -347,16 +357,16 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
        if (features_enabled == NULL)
                return -EINVAL;
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesLow,
+                       &smc_features_low)) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
                        return ret);
-       smc_features_low = vega20_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+                       &smc_features_high)) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
                        return ret);
-       smc_features_high = vega20_get_argument(hwmgr);
 
        *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
                        (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -371,13 +381,15 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
-               ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
-                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                               NULL);
                if (!ret)
-                       ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+                       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetToolsDramAddrLow,
-                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                                       NULL);
        }
 
        return ret;
@@ -389,14 +401,16 @@ int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
                        (struct vega20_smumgr *)(hwmgr->smu_backend);
        int ret = 0;
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+                       NULL)) == 0,
                        "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+                       NULL)) == 0,
                        "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
                        return ret);
 
index b0e0d67cd54b3e72987f37604326e6f0f48e9085..3da71a088b925f0d93a37661366b3c0b5fbaadfb 100644 (file)
@@ -356,7 +356,8 @@ static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -388,7 +389,8 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -1906,7 +1908,8 @@ static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                            PPSMC_MSG_EnableModeSwitchRLCNotification,
-                                           adev->gfx.cu_info.number);
+                                           adev->gfx.cu_info.number,
+                                           NULL);
 
        return 0;
 }
@@ -2060,7 +2063,7 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
                table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_AutomaticDCTransition) &&
-                               !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
+                               !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme, NULL))
                        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
        } else {
@@ -2250,10 +2253,12 @@ int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
        if (!hwmgr->avfs_supported)
                return 0;
 
-       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
        if (!ret) {
                if (data->apply_avfs_cks_off_voltage)
-                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+                       ret = smum_send_msg_to_smc(hwmgr,
+                                       PPSMC_MSG_ApplyAvfsCksOffVoltage,
+                                       NULL);
        }
 
        return ret;
@@ -2279,6 +2284,7 @@ const struct pp_smumgr_func vegam_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .process_firmware_header = vegam_process_firmware_header,
        .is_dpm_running = vegam_is_dpm_running,
        .get_mac_definition = vegam_get_mac_definition,
index 3f1044326dcb6b59d2b66e4f48864715adc2a3d5..61923530b2e4e1439b4c423fabef4064371ab769 100644 (file)
@@ -1796,7 +1796,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
                        "PD_Data_error_rate_coeff"};
        int result = 0;
 
-       if (!smu->pm_enabled || !buf)
+       if (!buf)
                return -EINVAL;
 
        size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
@@ -1887,8 +1887,6 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
 
        smu->power_profile_mode = input[size];
 
-       if (!smu->pm_enabled)
-               return ret;
        if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
                pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
                return -EINVAL;
index d6a6692db0acbdb645b56e910074b7dce852bcc4..c05d001163e0e8e6b63fc091318b5be632afc9a7 100644 (file)
@@ -137,10 +137,11 @@ static struct drm_info_list arcpgu_debugfs_list[] = {
        { "clocks", arcpgu_show_pxlclock, 0 },
 };
 
-static int arcpgu_debugfs_init(struct drm_minor *minor)
+static void arcpgu_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(arcpgu_debugfs_list,
-               ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor);
+       drm_debugfs_create_files(arcpgu_debugfs_list,
+                                ARRAY_SIZE(arcpgu_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index 442d4656150ae664b8e1ebc3a511beec83505126..6b85d5f4caa854fd4b3484f562fdb67928ad0222 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -260,17 +261,16 @@ static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
 
 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
 {
-       struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
+       struct komeda_kms_dev *kms;
        struct drm_device *drm;
        int err;
 
-       if (!kms)
-               return ERR_PTR(-ENOMEM);
+       kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
+                                struct komeda_kms_dev, base);
+       if (IS_ERR(kms))
+               return kms;
 
        drm = &kms->base;
-       err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
-       if (err)
-               goto free_kms;
 
        drm->dev_private = mdev;
 
@@ -327,9 +327,6 @@ cleanup_mode_config:
        drm_mode_config_cleanup(drm);
        komeda_kms_cleanup_private_objs(kms);
        drm->dev_private = NULL;
-       drm_dev_put(drm);
-free_kms:
-       kfree(kms);
        return ERR_PTR(err);
 }
 
@@ -346,5 +343,4 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
        drm_mode_config_cleanup(drm);
        komeda_kms_cleanup_private_objs(kms);
        drm->dev_private = NULL;
-       drm_dev_put(drm);
 }
index 2e053815b54aa890f677ffda5332c2555b47db65..194419f47c5e5d71e7f61d9f9b99b963c95637a9 100644 (file)
@@ -224,10 +224,11 @@ static struct drm_info_list hdlcd_debugfs_list[] = {
        { "clocks", hdlcd_show_pxlclock, 0 },
 };
 
-static int hdlcd_debugfs_init(struct drm_minor *minor)
+static void hdlcd_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(hdlcd_debugfs_list,
-               ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
+       drm_debugfs_create_files(hdlcd_debugfs_list,
+                                ARRAY_SIZE(hdlcd_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index 37d92a06318e4d4f8e29540d90dbcec49c3afafb..def8c9ffafcaf5ad4f95438af5178ba0b96e712c 100644 (file)
@@ -548,7 +548,7 @@ static const struct file_operations malidp_debugfs_fops = {
        .release = single_release,
 };
 
-static int malidp_debugfs_init(struct drm_minor *minor)
+static void malidp_debugfs_init(struct drm_minor *minor)
 {
        struct malidp_drm *malidp = minor->dev->dev_private;
 
@@ -557,7 +557,6 @@ static int malidp_debugfs_init(struct drm_minor *minor)
        spin_lock_init(&malidp->errors_lock);
        debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
                            minor->dev, &malidp_debugfs_fops);
-       return 0;
 }
 
 #endif //CONFIG_DEBUG_FS
index 197dca3fc84c2d71d8220d3f797f03e6daf3e351..dd9ed71ed942811d6ae958b1f26cadfd6fe26a17 100644 (file)
@@ -12,6 +12,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_prime.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_fb_helper.h>
@@ -103,6 +104,7 @@ static int armada_drm_bind(struct device *dev)
                kfree(priv);
                return ret;
        }
+       drmm_add_final_kfree(&priv->drm, priv);
 
        /* Remove early framebuffers */
        ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
index a10358bb61ec4578cd84f9c526f7196a5a3e866d..e7ca95827ae89658870223337887adda545b51e9 100644 (file)
@@ -5,6 +5,7 @@
 #include <drm/drm_simple_kms_helper.h>
 
 struct aspeed_gfx {
+       struct drm_device               drm;
        void __iomem                    *base;
        struct clk                      *clk;
        struct reset_control            *rst;
@@ -12,8 +13,8 @@ struct aspeed_gfx {
 
        struct drm_simple_display_pipe  pipe;
        struct drm_connector            connector;
-       struct drm_fbdev_cma            *fbdev;
 };
+#define to_aspeed_gfx(x) container_of(x, struct aspeed_gfx, drm)
 
 int aspeed_gfx_create_pipe(struct drm_device *drm);
 int aspeed_gfx_create_output(struct drm_device *drm);
index 2184b8be6fd4f1db8be6f451825787a2e81a25a8..e54686c31a90decf023d70f812a112302495bfc4 100644 (file)
@@ -231,7 +231,7 @@ static const uint32_t aspeed_gfx_formats[] = {
 
 int aspeed_gfx_create_pipe(struct drm_device *drm)
 {
-       struct aspeed_gfx *priv = drm->dev_private;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
 
        return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
                                            aspeed_gfx_formats,
index ada2f6aca906c9a72ab9b68d8770935933068933..6b27242b9ee3c1a55b856bb58d456c5307e534a3 100644 (file)
@@ -77,7 +77,7 @@ static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
 static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
 {
        struct drm_device *drm = data;
-       struct aspeed_gfx *priv = drm->dev_private;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
        u32 reg;
 
        reg = readl(priv->base + CRT_CTRL1);
@@ -96,15 +96,10 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
 static int aspeed_gfx_load(struct drm_device *drm)
 {
        struct platform_device *pdev = to_platform_device(drm->dev);
-       struct aspeed_gfx *priv;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
        struct resource *res;
        int ret;
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-       drm->dev_private = priv;
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->base = devm_ioremap_resource(drm->dev, res);
        if (IS_ERR(priv->base))
@@ -187,8 +182,6 @@ static void aspeed_gfx_unload(struct drm_device *drm)
 {
        drm_kms_helper_poll_fini(drm);
        drm_mode_config_cleanup(drm);
-
-       drm->dev_private = NULL;
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(fops);
@@ -216,27 +209,26 @@ static const struct of_device_id aspeed_gfx_match[] = {
 
 static int aspeed_gfx_probe(struct platform_device *pdev)
 {
-       struct drm_device *drm;
+       struct aspeed_gfx *priv;
        int ret;
 
-       drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
-       if (IS_ERR(drm))
-               return PTR_ERR(drm);
+       priv = devm_drm_dev_alloc(&pdev->dev, &aspeed_gfx_driver,
+                                 struct aspeed_gfx, drm);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
-       ret = aspeed_gfx_load(drm);
+       ret = aspeed_gfx_load(&priv->drm);
        if (ret)
-               goto err_free;
+               return ret;
 
-       ret = drm_dev_register(drm, 0);
+       ret = drm_dev_register(&priv->drm, 0);
        if (ret)
                goto err_unload;
 
        return 0;
 
 err_unload:
-       aspeed_gfx_unload(drm);
-err_free:
-       drm_dev_put(drm);
+       aspeed_gfx_unload(&priv->drm);
 
        return ret;
 }
@@ -247,7 +239,6 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
 
        drm_dev_unregister(drm);
        aspeed_gfx_unload(drm);
-       drm_dev_put(drm);
 
        return 0;
 }
index 67ee5fa10055b02c1984483b2a6eb5fcb88067dc..6759cb88415a4ca3e944ed64869c2e7963ef1833 100644 (file)
@@ -28,7 +28,7 @@ static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
 
 int aspeed_gfx_create_output(struct drm_device *drm)
 {
-       struct aspeed_gfx *priv = drm->dev_private;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
        int ret;
 
        priv->connector.dpms = DRM_MODE_DPMS_OFF;
index 30aa73a5d9b723f3400e330c7e681487d67bd9aa..b7ba22dddcad98c8c3056c978b5d239a7b3ce9b2 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_probe_helper.h>
 
@@ -111,6 +112,8 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto err_ast_driver_unload;
 
+       drm_fbdev_generic_setup(dev, 32);
+
        return 0;
 
 err_ast_driver_unload:
index 18a0a4ce00f6e0b823fd24f080085e747049d73e..e5398e3dabe708fe8f778e9b574da4a584d47ed3 100644 (file)
@@ -30,7 +30,6 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_gem_vram_helper.h>
@@ -512,10 +511,6 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_reset(dev);
 
-       ret = drm_fbdev_generic_setup(dev, 32);
-       if (ret)
-               goto out_free;
-
        return 0;
 out_free:
        kfree(ast);
index cdd6c46d6557d529d473cee8540f29e2d4cae603..7d39b858c9f1fbbfe68b6d2f0ad353f46b9ed99a 100644 (file)
@@ -561,8 +561,9 @@ static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
        return 0;
 }
 
-void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
-                                           struct drm_plane_state *old_state)
+static void
+ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+                                      struct drm_plane_state *old_state)
 {
        struct ast_private *ast = plane->dev->dev_private;
        struct drm_plane_state *state = plane->state;
@@ -801,6 +802,9 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
+       if (!state->enable)
+               return 0; /* no mode checks if CRTC is being disabled */
+
        ast_state = to_ast_crtc_state(state);
 
        format = ast_state->format;
@@ -881,6 +885,17 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
        .atomic_disable = ast_crtc_helper_atomic_disable,
 };
 
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+       struct ast_crtc_state *ast_state =
+               kzalloc(sizeof(*ast_state), GFP_KERNEL);
+
+       if (crtc->state)
+               crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+       __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+}
+
 static void ast_crtc_destroy(struct drm_crtc *crtc)
 {
        drm_crtc_cleanup(crtc);
@@ -919,8 +934,7 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_funcs ast_crtc_funcs = {
-       .reset = drm_atomic_helper_crtc_reset,
-       .set_config = drm_crtc_helper_set_config,
+       .reset = ast_crtc_reset,
        .gamma_set = drm_atomic_helper_legacy_gamma_set,
        .destroy = ast_crtc_destroy,
        .set_config = drm_atomic_helper_set_config,
@@ -1069,7 +1083,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
 {
        struct ast_connector *ast_connector = to_ast_connector(connector);
        ast_i2c_destroy(ast_connector->i2c);
-       drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -1112,8 +1125,6 @@ static int ast_connector_init(struct drm_device *dev)
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       drm_connector_register(connector);
-
        connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
        encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
index e2019fe97fff5b6bfd0864f6bd22dbee03f60721..43bc709e35237d8d9158f729f0d09ed29592e799 100644 (file)
 #include <linux/media-bus-format.h>
 #include <linux/of_graph.h>
 
+#include <drm/drm_bridge.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "atmel_hlcdc_dc.h"
 
@@ -22,10 +23,6 @@ struct atmel_hlcdc_rgb_output {
        int bus_fmt;
 };
 
-static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static struct atmel_hlcdc_rgb_output *
 atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder)
 {
@@ -98,9 +95,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
                return -EINVAL;
        }
 
-       ret = drm_encoder_init(dev, &output->encoder,
-                              &atmel_hlcdc_panel_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(dev, &output->encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret)
                return ret;
 
index 917767173ee6836deea7b5e2bfa704c0aa77ea2d..e5bd1d517a18d09398b6408e98fdbaf467c370c5 100644 (file)
@@ -92,7 +92,6 @@ void bochs_mm_fini(struct bochs_device *bochs);
 
 /* bochs_kms.c */
 int bochs_kms_init(struct bochs_device *bochs);
-void bochs_kms_fini(struct bochs_device *bochs);
 
 /* bochs_fbdev.c */
 extern const struct drm_mode_config_funcs bochs_mode_funcs;
index addb0568c1affef3e5cffb9d53783006d13e0864..e18c51de11969b370da55a63a2b3ff11fa982f1a 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <drm/drm_drv.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_managed.h>
 
 #include "bochs.h"
 
@@ -21,10 +22,7 @@ static void bochs_unload(struct drm_device *dev)
 {
        struct bochs_device *bochs = dev->dev_private;
 
-       bochs_kms_fini(bochs);
        bochs_mm_fini(bochs);
-       kfree(bochs);
-       dev->dev_private = NULL;
 }
 
 static int bochs_load(struct drm_device *dev)
@@ -32,7 +30,7 @@ static int bochs_load(struct drm_device *dev)
        struct bochs_device *bochs;
        int ret;
 
-       bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+       bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
        if (bochs == NULL)
                return -ENOMEM;
        dev->dev_private = bochs;
index 8066d7d370d5b5cf50932e6cfcecfa6762c19347..05d8373888e81d48250c618f2e821555436062c1 100644 (file)
@@ -104,7 +104,6 @@ static void bochs_connector_init(struct drm_device *dev)
                           DRM_MODE_CONNECTOR_VIRTUAL);
        drm_connector_helper_add(connector,
                                 &bochs_connector_connector_helper_funcs);
-       drm_connector_register(connector);
 
        bochs_hw_load_edid(bochs);
        if (bochs->edid) {
@@ -134,7 +133,11 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
 
 int bochs_kms_init(struct bochs_device *bochs)
 {
-       drm_mode_config_init(bochs->dev);
+       int ret;
+
+       ret = drmm_mode_config_init(bochs->dev);
+       if (ret)
+               return ret;
 
        bochs->dev->mode_config.max_width = 8192;
        bochs->dev->mode_config.max_height = 8192;
@@ -160,12 +163,3 @@ int bochs_kms_init(struct bochs_device *bochs)
 
        return 0;
 }
-
-void bochs_kms_fini(struct bochs_device *bochs)
-{
-       if (!bochs->dev->mode_config.num_connector)
-               return;
-
-       drm_atomic_helper_shutdown(bochs->dev);
-       drm_mode_config_cleanup(bochs->dev);
-}
index aaed2347ace9d8112ae508f6e9ee93b1027e8468..04f876e985deafd9f369e65d2645e855cb90b77c 100644 (file)
@@ -27,6 +27,16 @@ config DRM_CDNS_DSI
          Support Cadence DPI to DSI bridge. This is an internal
          bridge and is meant to be directly embedded in a SoC.
 
+config DRM_CHRONTEL_CH7033
+       tristate "Chrontel CH7033 Video Encoder"
+       depends on OF
+       select DRM_KMS_HELPER
+       help
+         Enable support for the Chrontel CH7033 VGA/DVI/HDMI Encoder, as
+         found in the Dell Wyse 3020 thin client.
+
+         If in doubt, say "N".
+
 config DRM_DISPLAY_CONNECTOR
        tristate "Display connector support"
        depends on OF
@@ -58,6 +68,22 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
          to DP++. This is used with the i.MX6 imx-ldb
          driver. You are likely to say N here.
 
+config DRM_NWL_MIPI_DSI
+       tristate "Northwest Logic MIPI DSI Host controller"
+       depends on DRM
+       depends on COMMON_CLK
+       depends on OF && HAS_IOMEM
+       select DRM_KMS_HELPER
+       select DRM_MIPI_DSI
+       select DRM_PANEL_BRIDGE
+       select GENERIC_PHY_MIPI_DPHY
+       select MFD_SYSCON
+       select MULTIPLEXER
+       select REGMAP_MMIO
+       help
+         This enables the Northwest Logic MIPI DSI Host controller as
+         for example found on NXP's i.MX8 Processors.
+
 config DRM_NXP_PTN3460
        tristate "NXP PTN3460 DP/LVDS bridge"
        depends on OF
index 6fb062b5b0f04e493dcb6cf6a89cefc10ff3ddad..d63d4b7e434733f6fb8cbea58edbf6b8c168bf98 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
 obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
 obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
 obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
 obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
 
 obj-y += analogix/
 obj-y += synopsys/
index 47d4eb9e845d085cedd1bf8c9ddb9a1bca92fd8d..f46a5e26b5dd640670afa21802f9019d5c7439fb 100644 (file)
@@ -6,7 +6,7 @@ config DRM_I2C_ADV7511
        select REGMAP_I2C
        select DRM_MIPI_DSI
        help
-         Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
+         Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
 
 config DRM_I2C_ADV7511_AUDIO
        bool "ADV7511 HDMI Audio driver"
index a428185be2c1576c6bf68e9d55a80ee470795c41..f101dd2819b5290dbb2992c3d04aea0432c14182 100644 (file)
@@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
 {
        switch (fs) {
        case 32000:
-               *n = 4096;
+       case 48000:
+       case 96000:
+       case 192000:
+               *n = fs * 128 / 1000;
                break;
        case 44100:
-               *n = 6272;
-               break;
-       case 48000:
-               *n = 6144;
+       case 88200:
+       case 176400:
+               *n = fs * 128 / 900;
                break;
        }
 
@@ -119,6 +121,9 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data,
                audio_source = ADV7511_AUDIO_SOURCE_I2S;
                i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
                break;
+       case HDMI_SPDIF:
+               audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+               break;
        default:
                return -EINVAL;
        }
@@ -175,11 +180,21 @@ static int audio_startup(struct device *dev, void *data)
        /* use Audio infoframe updated info */
        regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
                                BIT(5), 0);
+       /* enable SPDIF receiver */
+       if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+                                  BIT(7), BIT(7));
+
        return 0;
 }
 
 static void audio_shutdown(struct device *dev, void *data)
 {
+       struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+       if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+                                  BIT(7), 0);
 }
 
 static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -213,6 +228,7 @@ static const struct hdmi_codec_pdata codec_data = {
        .ops = &adv7511_codec_ops,
        .max_i2s_channels = 2,
        .i2s = 1,
+       .spdif = 1,
 };
 
 int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
new file mode 100644 (file)
index 0000000..f8675d8
--- /dev/null
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Chrontel CH7033 Video Encoder Driver
+ *
+ * Copyright (C) 2019,2020 Lubomir Rintel
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* Page 0, Register 0x07 */
+enum {
+       DRI_PD          = BIT(3),
+       IO_PD           = BIT(5),
+};
+
+/* Page 0, Register 0x08 */
+enum {
+       DRI_PDDRI       = GENMASK(7, 4),
+       PDDAC           = GENMASK(3, 1),
+       PANEN           = BIT(0),
+};
+
+/* Page 0, Register 0x09 */
+enum {
+       DPD             = BIT(7),
+       GCKOFF          = BIT(6),
+       TV_BP           = BIT(5),
+       SCLPD           = BIT(4),
+       SDPD            = BIT(3),
+       VGA_PD          = BIT(2),
+       HDBKPD          = BIT(1),
+       HDMI_PD         = BIT(0),
+};
+
+/* Page 0, Register 0x0a */
+enum {
+       MEMINIT         = BIT(7),
+       MEMIDLE         = BIT(6),
+       MEMPD           = BIT(5),
+       STOP            = BIT(4),
+       LVDS_PD         = BIT(3),
+       HD_DVIB         = BIT(2),
+       HDCP_PD         = BIT(1),
+       MCU_PD          = BIT(0),
+};
+
+/* Page 0, Register 0x18 */
+enum {
+       IDF             = GENMASK(7, 4),
+       INTEN           = BIT(3),
+       SWAP            = GENMASK(2, 0),
+};
+
+enum {
+       BYTE_SWAP_RGB   = 0,
+       BYTE_SWAP_RBG   = 1,
+       BYTE_SWAP_GRB   = 2,
+       BYTE_SWAP_GBR   = 3,
+       BYTE_SWAP_BRG   = 4,
+       BYTE_SWAP_BGR   = 5,
+};
+
+/* Page 0, Register 0x19 */
+enum {
+       HPO_I           = BIT(5),
+       VPO_I           = BIT(4),
+       DEPO_I          = BIT(3),
+       CRYS_EN         = BIT(2),
+       GCLKFREQ        = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2e */
+enum {
+       HFLIP           = BIT(7),
+       VFLIP           = BIT(6),
+       DEPO_O          = BIT(5),
+       HPO_O           = BIT(4),
+       VPO_O           = BIT(3),
+       TE              = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2b */
+enum {
+       SWAPS           = GENMASK(7, 4),
+       VFMT            = GENMASK(3, 0),
+};
+
+/* Page 0, Register 0x54 */
+enum {
+       COMP_BP         = BIT(7),
+       DAC_EN_T        = BIT(6),
+       HWO_HDMI_HI     = GENMASK(5, 3),
+       HOO_HDMI_HI     = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x57 */
+enum {
+       FLDSEN          = BIT(7),
+       VWO_HDMI_HI     = GENMASK(5, 3),
+       VOO_HDMI_HI     = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x7e */
+enum {
+       HDMI_LVDS_SEL   = BIT(7),
+       DE_GEN          = BIT(6),
+       PWM_INDEX_HI    = BIT(5),
+       USE_DE          = BIT(4),
+       R_INT           = GENMASK(3, 0),
+};
+
+/* Page 1, Register 0x07 */
+enum {
+       BPCKSEL         = BIT(7),
+       DRI_CMFB_EN     = BIT(6),
+       CEC_PUEN        = BIT(5),
+       CEC_T           = BIT(3),
+       CKINV           = BIT(2),
+       CK_TVINV        = BIT(1),
+       DRI_CKS2        = BIT(0),
+};
+
+/* Page 1, Register 0x08 */
+enum {
+       DACG            = BIT(6),
+       DACKTST         = BIT(5),
+       DEDGEB          = BIT(4),
+       SYO             = BIT(3),
+       DRI_IT_LVDS     = GENMASK(2, 1),
+       DISPON          = BIT(0),
+};
+
+/* Page 1, Register 0x0c */
+enum {
+       DRI_PLL_CP      = GENMASK(7, 6),
+       DRI_PLL_DIVSEL  = BIT(5),
+       DRI_PLL_N1_1    = BIT(4),
+       DRI_PLL_N1_0    = BIT(3),
+       DRI_PLL_N3_1    = BIT(2),
+       DRI_PLL_N3_0    = BIT(1),
+       DRI_PLL_CKTSTEN = BIT(0),
+};
+
+/* Page 1, Register 0x6b */
+enum {
+       VCO3CS          = GENMASK(7, 6),
+       ICPGBK2_0       = GENMASK(5, 3),
+       DRI_VCO357SC    = BIT(2),
+       PDPLL2          = BIT(1),
+       DRI_PD_SER      = BIT(0),
+};
+
+/* Page 1, Register 0x6c */
+enum {
+       PLL2N11         = GENMASK(7, 4),
+       PLL2N5_4        = BIT(3),
+       PLL2N5_TOP      = BIT(2),
+       DRI_PLL_PD      = BIT(1),
+       PD_I2CM         = BIT(0),
+};
+
+/* Page 3, Register 0x28 */
+enum {
+       DIFF_EN         = GENMASK(7, 6),
+       CORREC_EN       = GENMASK(5, 4),
+       VGACLK_BP       = BIT(3),
+       HM_LV_SEL       = BIT(2),
+       HD_VGA_SEL      = BIT(1),
+};
+
+/* Page 3, Register 0x2a */
+enum {
+       LVDSCLK_BP      = BIT(7),
+       HDTVCLK_BP      = BIT(6),
+       HDMICLK_BP      = BIT(5),
+       HDTV_BP         = BIT(4),
+       HDMI_BP         = BIT(3),
+       THRWL           = GENMASK(2, 0),
+};
+
+/* Page 4, Register 0x52 */
+enum {
+       PGM_ARSTB       = BIT(7),
+       MCU_ARSTB       = BIT(6),
+       MCU_RETB        = BIT(2),
+       RESETIB         = BIT(1),
+       RESETDB         = BIT(0),
+};
+
+struct ch7033_priv {
+       struct regmap *regmap;
+       struct drm_bridge *next_bridge;
+       struct drm_bridge bridge;
+       struct drm_connector connector;
+};
+
+#define conn_to_ch7033_priv(x) \
+       container_of(x, struct ch7033_priv, connector)
+#define bridge_to_ch7033_priv(x) \
+       container_of(x, struct ch7033_priv, bridge)
+
+
+static enum drm_connector_status ch7033_connector_detect(
+       struct drm_connector *connector, bool force)
+{
+       struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+       return drm_bridge_detect(priv->next_bridge);
+}
+
+static const struct drm_connector_funcs ch7033_connector_funcs = {
+       .reset = drm_atomic_helper_connector_reset,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .detect = ch7033_connector_detect,
+       .destroy = drm_connector_cleanup,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ch7033_connector_get_modes(struct drm_connector *connector)
+{
+       struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+       struct edid *edid;
+       int ret;
+
+       edid = drm_bridge_get_edid(priv->next_bridge, connector);
+       drm_connector_update_edid_property(connector, edid);
+       if (edid) {
+               ret = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       } else {
+               ret = drm_add_modes_noedid(connector, 1920, 1080);
+               drm_set_preferred_mode(connector, 1024, 768);
+       }
+
+       return ret;
+}
+
+static struct drm_encoder *ch7033_connector_best_encoder(
+                       struct drm_connector *connector)
+{
+       struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+       return priv->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ch7033_connector_helper_funcs = {
+       .get_modes = ch7033_connector_get_modes,
+       .best_encoder = ch7033_connector_best_encoder,
+};
+
+static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
+{
+       struct ch7033_priv *priv = arg;
+
+       if (priv->bridge.dev)
+               drm_helper_hpd_irq_event(priv->connector.dev);
+}
+
+static int ch7033_bridge_attach(struct drm_bridge *bridge,
+                               enum drm_bridge_attach_flags flags)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+       struct drm_connector *connector = &priv->connector;
+       int ret;
+
+       ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+                               DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+       if (ret)
+               return ret;
+
+       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+               return 0;
+
+       if (priv->next_bridge->ops & DRM_BRIDGE_OP_DETECT) {
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
+       } else {
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                   DRM_CONNECTOR_POLL_DISCONNECT;
+       }
+
+       if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+               drm_bridge_hpd_enable(priv->next_bridge, ch7033_hpd_event,
+                                     priv);
+       }
+
+       drm_connector_helper_add(connector,
+                                &ch7033_connector_helper_funcs);
+       ret = drm_connector_init_with_ddc(bridge->dev, &priv->connector,
+                                         &ch7033_connector_funcs,
+                                         priv->next_bridge->type,
+                                         priv->next_bridge->ddc);
+       if (ret) {
+               DRM_ERROR("Failed to initialize connector\n");
+               return ret;
+       }
+
+       return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+}
+
+static void ch7033_bridge_detach(struct drm_bridge *bridge)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+       if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD)
+               drm_bridge_hpd_disable(priv->next_bridge);
+       drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status ch7033_bridge_mode_valid(struct drm_bridge *bridge,
+                                    const struct drm_display_mode *mode)
+{
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+       if (mode->hdisplay >= 1920)
+               return MODE_BAD_HVALUE;
+       if (mode->vdisplay >= 1080)
+               return MODE_BAD_VVALUE;
+       return MODE_OK;
+}
+
+static void ch7033_bridge_disable(struct drm_bridge *bridge)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+       regmap_write(priv->regmap, 0x03, 0x04);
+       regmap_update_bits(priv->regmap, 0x52, RESETDB, 0x00);
+}
+
+static void ch7033_bridge_enable(struct drm_bridge *bridge)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+       regmap_write(priv->regmap, 0x03, 0x04);
+       regmap_update_bits(priv->regmap, 0x52, RESETDB, RESETDB);
+}
+
+static void ch7033_bridge_mode_set(struct drm_bridge *bridge,
+                                  const struct drm_display_mode *mode,
+                                  const struct drm_display_mode *adjusted_mode)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+       int hbporch = mode->hsync_start - mode->hdisplay;
+       int hsynclen = mode->hsync_end - mode->hsync_start;
+       int vbporch = mode->vsync_start - mode->vdisplay;
+       int vsynclen = mode->vsync_end - mode->vsync_start;
+
+       /*
+        * Page 4
+        */
+       regmap_write(priv->regmap, 0x03, 0x04);
+
+       /* Turn everything off to set all the registers to their defaults. */
+       regmap_write(priv->regmap, 0x52, 0x00);
+       /* Bring I/O block up. */
+       regmap_write(priv->regmap, 0x52, RESETIB);
+
+       /*
+        * Page 0
+        */
+       regmap_write(priv->regmap, 0x03, 0x00);
+
+       /* Bring up parts we need from the power down. */
+       regmap_update_bits(priv->regmap, 0x07, DRI_PD | IO_PD, 0);
+       regmap_update_bits(priv->regmap, 0x08, DRI_PDDRI | PDDAC | PANEN, 0);
+       regmap_update_bits(priv->regmap, 0x09, DPD | GCKOFF |
+                                              HDMI_PD | VGA_PD, 0);
+       regmap_update_bits(priv->regmap, 0x0a, HD_DVIB, 0);
+
+       /* Horizontal input timing. */
+       regmap_write(priv->regmap, 0x0b, (mode->htotal >> 8) << 3 |
+                                        (mode->hdisplay >> 8));
+       regmap_write(priv->regmap, 0x0c, mode->hdisplay);
+       regmap_write(priv->regmap, 0x0d, mode->htotal);
+       regmap_write(priv->regmap, 0x0e, (hsynclen >> 8) << 3 |
+                                        (hbporch >> 8));
+       regmap_write(priv->regmap, 0x0f, hbporch);
+       regmap_write(priv->regmap, 0x10, hsynclen);
+
+       /* Vertical input timing. */
+       regmap_write(priv->regmap, 0x11, (mode->vtotal >> 8) << 3 |
+                                        (mode->vdisplay >> 8));
+       regmap_write(priv->regmap, 0x12, mode->vdisplay);
+       regmap_write(priv->regmap, 0x13, mode->vtotal);
+       regmap_write(priv->regmap, 0x14, ((vsynclen >> 8) << 3) |
+                                        (vbporch >> 8));
+       regmap_write(priv->regmap, 0x15, vbporch);
+       regmap_write(priv->regmap, 0x16, vsynclen);
+
+       /* Input color swap. */
+       regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR);
+
+       /* Input clock and sync polarity. */
+       regmap_update_bits(priv->regmap, 0x19, 0x1, mode->clock >> 16);
+       regmap_update_bits(priv->regmap, 0x19, HPO_I | VPO_I | GCLKFREQ,
+                          (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_I : 0 |
+                          (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_I : 0 |
+                          mode->clock >> 16);
+       regmap_write(priv->regmap, 0x1a, mode->clock >> 8);
+       regmap_write(priv->regmap, 0x1b, mode->clock);
+
+       /* Horizontal output timing. */
+       regmap_write(priv->regmap, 0x1f, (mode->htotal >> 8) << 3 |
+                                        (mode->hdisplay >> 8));
+       regmap_write(priv->regmap, 0x20, mode->hdisplay);
+       regmap_write(priv->regmap, 0x21, mode->htotal);
+
+       /* Vertical output timing. */
+       regmap_write(priv->regmap, 0x25, (mode->vtotal >> 8) << 3 |
+                                        (mode->vdisplay >> 8));
+       regmap_write(priv->regmap, 0x26, mode->vdisplay);
+       regmap_write(priv->regmap, 0x27, mode->vtotal);
+
+       /* VGA channel bypass */
+       regmap_update_bits(priv->regmap, 0x2b, VFMT, 9);
+
+       /* Output sync polarity. */
+       regmap_update_bits(priv->regmap, 0x2e, HPO_O | VPO_O,
+                          (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_O : 0 |
+                          (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_O : 0);
+
+       /* HDMI horizontal output timing. */
+       regmap_update_bits(priv->regmap, 0x54, HWO_HDMI_HI | HOO_HDMI_HI,
+                                              (hsynclen >> 8) << 3 |
+                                              (hbporch >> 8));
+       regmap_write(priv->regmap, 0x55, hbporch);
+       regmap_write(priv->regmap, 0x56, hsynclen);
+
+       /* HDMI vertical output timing. */
+       regmap_update_bits(priv->regmap, 0x57, VWO_HDMI_HI | VOO_HDMI_HI,
+                                              (vsynclen >> 8) << 3 |
+                                              (vbporch >> 8));
+       regmap_write(priv->regmap, 0x58, vbporch);
+       regmap_write(priv->regmap, 0x59, vsynclen);
+
+       /* Pick HDMI, not LVDS. */
+       regmap_update_bits(priv->regmap, 0x7e, HDMI_LVDS_SEL, HDMI_LVDS_SEL);
+
+       /*
+        * Page 1
+        */
+       regmap_write(priv->regmap, 0x03, 0x01);
+
+       /* No idea what these do, but VGA is wobbly and blinky without them. */
+       regmap_update_bits(priv->regmap, 0x07, CKINV, CKINV);
+       regmap_update_bits(priv->regmap, 0x08, DISPON, DISPON);
+
+       /* DRI PLL */
+       regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_DIVSEL, DRI_PLL_DIVSEL);
+       if (mode->clock <= 40000) {
+               regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+                                                      DRI_PLL_N1_0 |
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N3_0,
+                                                      0);
+       } else if (mode->clock < 80000) {
+               regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+                                                      DRI_PLL_N1_0 |
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N3_0,
+                                                      DRI_PLL_N3_0 |
+                                                      DRI_PLL_N1_0);
+       } else {
+               regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+                                                      DRI_PLL_N1_0 |
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N3_0,
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N1_1);
+       }
+
+       /* This seems to be color calibration for VGA. */
+       regmap_write(priv->regmap, 0x64, 0x29); /* LSB Blue */
+       regmap_write(priv->regmap, 0x65, 0x29); /* LSB Green */
+       regmap_write(priv->regmap, 0x66, 0x29); /* LSB Red */
+       regmap_write(priv->regmap, 0x67, 0x00); /* MSB Blue */
+       regmap_write(priv->regmap, 0x68, 0x00); /* MSB Green */
+       regmap_write(priv->regmap, 0x69, 0x00); /* MSB Red */
+
+       regmap_update_bits(priv->regmap, 0x6b, DRI_PD_SER, 0x00);
+       regmap_update_bits(priv->regmap, 0x6c, DRI_PLL_PD, 0x00);
+
+       /*
+        * Page 3
+        */
+       regmap_write(priv->regmap, 0x03, 0x03);
+
+       /* More bypasses and apparently another HDMI/LVDS selector. */
+       regmap_update_bits(priv->regmap, 0x28, VGACLK_BP | HM_LV_SEL,
+                                              VGACLK_BP | HM_LV_SEL);
+       regmap_update_bits(priv->regmap, 0x2a, HDMICLK_BP | HDMI_BP,
+                                              HDMICLK_BP | HDMI_BP);
+
+       /*
+        * Page 4
+        */
+       regmap_write(priv->regmap, 0x03, 0x04);
+
+       /* Output clock. */
+       regmap_write(priv->regmap, 0x10, mode->clock >> 16);
+       regmap_write(priv->regmap, 0x11, mode->clock >> 8);
+       regmap_write(priv->regmap, 0x12, mode->clock);
+}
+
+static const struct drm_bridge_funcs ch7033_bridge_funcs = {
+       .attach = ch7033_bridge_attach,
+       .detach = ch7033_bridge_detach,
+       .mode_valid = ch7033_bridge_mode_valid,
+       .disable = ch7033_bridge_disable,
+       .enable = ch7033_bridge_enable,
+       .mode_set = ch7033_bridge_mode_set,
+};
+
+static const struct regmap_config ch7033_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0x7f,
+};
+
+static int ch7033_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct ch7033_priv *priv;
+       unsigned int val;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev_set_drvdata(dev, priv);
+
+       ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL,
+                                         &priv->next_bridge);
+       if (ret)
+               return ret;
+
+       priv->regmap = devm_regmap_init_i2c(client, &ch7033_regmap_config);
+       if (IS_ERR(priv->regmap)) {
+               dev_err(&client->dev, "regmap init failed\n");
+               return PTR_ERR(priv->regmap);
+       }
+
+       ret = regmap_read(priv->regmap, 0x00, &val);
+       if (ret < 0) {
+               dev_err(&client->dev, "error reading the model id: %d\n", ret);
+               return ret;
+       }
+       if ((val & 0xf7) != 0x56) {
+               dev_err(&client->dev, "the device is not a ch7033\n");
+               return -ENODEV;
+       }
+
+       regmap_write(priv->regmap, 0x03, 0x04);
+       ret = regmap_read(priv->regmap, 0x51, &val);
+       if (ret < 0) {
+               dev_err(&client->dev, "error reading the model id: %d\n", ret);
+               return ret;
+       }
+       if ((val & 0x0f) != 3) {
+               dev_err(&client->dev, "unknown revision %u\n", val);
+               return -ENODEV;
+       }
+
+       INIT_LIST_HEAD(&priv->bridge.list);
+       priv->bridge.funcs = &ch7033_bridge_funcs;
+       priv->bridge.of_node = dev->of_node;
+       drm_bridge_add(&priv->bridge);
+
+       dev_info(dev, "Chrontel CH7033 Video Encoder\n");
+       return 0;
+}
+
+static int ch7033_remove(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct ch7033_priv *priv = dev_get_drvdata(dev);
+
+       drm_bridge_remove(&priv->bridge);
+
+       return 0;
+}
+
+static const struct of_device_id ch7033_dt_ids[] = {
+       { .compatible = "chrontel,ch7033", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
+
+static const struct i2c_device_id ch7033_ids[] = {
+       { "ch7033", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, ch7033_ids);
+
+static struct i2c_driver ch7033_driver = {
+       .probe = ch7033_probe,
+       .remove = ch7033_remove,
+       .driver = {
+               .name = "ch7033",
+               .of_match_table = of_match_ptr(ch7033_dt_ids),
+       },
+       .id_table = ch7033_ids,
+};
+
+module_i2c_driver(ch7033_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Chrontel CH7033 Video Encoder Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
new file mode 100644 (file)
index 0000000..b14d725
--- /dev/null
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX8 NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2020 Purism SPC
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/time64.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+#include "nwl-dsi.h"
+
+#define DRV_NAME "nwl-dsi"
+
+/* i.MX8 NWL quirks */
+/* i.MX8MQ errata E11418 */
+#define E11418_HS_MODE_QUIRK   BIT(0)
+
+#define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
+
+enum transfer_direction {
+       DSI_PACKET_SEND,
+       DSI_PACKET_RECEIVE,
+};
+
+#define NWL_DSI_ENDPOINT_LCDIF 0
+#define NWL_DSI_ENDPOINT_DCSS 1
+
+struct nwl_dsi_plat_clk_config {
+       const char *id;
+       struct clk *clk;
+       bool present;
+};
+
+struct nwl_dsi_transfer {
+       const struct mipi_dsi_msg *msg;
+       struct mipi_dsi_packet packet;
+       struct completion completed;
+
+       int status; /* status of transmission */
+       enum transfer_direction direction;
+       bool need_bta;
+       u8 cmd;
+       u16 rx_word_count;
+       size_t tx_len; /* in bytes */
+       size_t rx_len; /* in bytes */
+};
+
+struct nwl_dsi {
+       struct drm_bridge bridge;
+       struct mipi_dsi_host dsi_host;
+       struct drm_bridge *panel_bridge;
+       struct device *dev;
+       struct phy *phy;
+       union phy_configure_opts phy_cfg;
+       unsigned int quirks;
+
+       struct regmap *regmap;
+       int irq;
+       /*
+        * The DSI host controller needs this reset sequence according to NWL:
+        * 1. Deassert pclk reset to get access to DSI regs
+        * 2. Configure DSI Host and DPHY and enable DPHY
+        * 3. Deassert ESC and BYTE resets to allow host TX operations)
+        * 4. Send DSI cmds to configure peripheral (handled by panel drv)
+        * 5. Deassert DPI reset so DPI receives pixels and starts sending
+        *    DSI data
+        *
+        * TODO: Since panel_bridges do their DSI setup in enable we
+        * currently have 4. and 5. swapped.
+        */
+       struct reset_control *rst_byte;
+       struct reset_control *rst_esc;
+       struct reset_control *rst_dpi;
+       struct reset_control *rst_pclk;
+       struct mux_control *mux;
+
+       /* DSI clocks */
+       struct clk *phy_ref_clk;
+       struct clk *rx_esc_clk;
+       struct clk *tx_esc_clk;
+       struct clk *core_clk;
+       /*
+        * hardware bug: the i.MX8MQ needs this clock on during reset
+        * even when not using LCDIF.
+        */
+       struct clk *lcdif_clk;
+
+       /* dsi lanes */
+       u32 lanes;
+       enum mipi_dsi_pixel_format format;
+       struct drm_display_mode mode;
+       unsigned long dsi_mode_flags;
+       int error;
+
+       struct nwl_dsi_transfer *xfer;
+};
+
+static const struct regmap_config nwl_dsi_regmap_config = {
+       .reg_bits = 16,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = NWL_DSI_IRQ_MASK2,
+       .name = DRV_NAME,
+};
+
+static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
+{
+       return container_of(bridge, struct nwl_dsi, bridge);
+}
+
+static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
+{
+       int ret = dsi->error;
+
+       dsi->error = 0;
+       return ret;
+}
+
+static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
+{
+       int ret;
+
+       if (dsi->error)
+               return;
+
+       ret = regmap_write(dsi->regmap, reg, val);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev,
+                             "Failed to write NWL DSI reg 0x%x: %d\n", reg,
+                             ret);
+               dsi->error = ret;
+       }
+}
+
+static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
+{
+       unsigned int val;
+       int ret;
+
+       if (dsi->error)
+               return 0;
+
+       ret = regmap_read(dsi->regmap, reg, &val);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
+                             reg, ret);
+               dsi->error = ret;
+       }
+       return val;
+}
+
+static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
+{
+       switch (format) {
+       case MIPI_DSI_FMT_RGB565:
+               return NWL_DSI_PIXEL_FORMAT_16;
+       case MIPI_DSI_FMT_RGB666:
+               return NWL_DSI_PIXEL_FORMAT_18L;
+       case MIPI_DSI_FMT_RGB666_PACKED:
+               return NWL_DSI_PIXEL_FORMAT_18;
+       case MIPI_DSI_FMT_RGB888:
+               return NWL_DSI_PIXEL_FORMAT_24;
+       default:
+               return -EINVAL;
+       }
+}
+
+/*
+ * ps2bc - Picoseconds to byte clock cycles
+ */
+static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
+{
+       u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+       return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
+                                 dsi->lanes * 8 * NSEC_PER_SEC);
+}
+
+/*
+ * ui2bc - UI time periods to byte clock cycles
+ */
+static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+{
+       u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+       return DIV64_U64_ROUND_UP(ui * dsi->lanes,
+                                 dsi->mode.clock * 1000 * bpp);
+}
+
+/*
+ * us2bc - micro seconds to lp clock cycles
+ */
+static u32 us2lp(u32 lp_clk_rate, unsigned long us)
+{
+       return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
+}
+
+static int nwl_dsi_config_host(struct nwl_dsi *dsi)
+{
+       u32 cycles;
+       struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
+
+       if (dsi->lanes < 1 || dsi->lanes > 4)
+               return -EINVAL;
+
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
+
+       if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+               nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
+               nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
+       } else {
+               nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
+               nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
+       }
+
+       /* values in byte clock cycles */
+       cycles = ui2bc(dsi, cfg->clk_pre);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
+       cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
+       cycles += ui2bc(dsi, cfg->clk_pre);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
+       cycles = ps2bc(dsi, cfg->hs_exit);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
+
+       nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
+       /* In LP clock cycles */
+       cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
+
+       return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
+{
+       u32 mode;
+       int color_format;
+       bool burst_mode;
+       int hfront_porch, hback_porch, vfront_porch, vback_porch;
+       int hsync_len, vsync_len;
+
+       hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
+       hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
+       hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
+
+       vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
+       vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
+       vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
+
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
+
+       color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
+       if (color_format < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
+                             dsi->format);
+               return color_format;
+       }
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
+
+       nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
+       nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
+       /*
+        * Adjusting input polarity based on the video mode results in
+        * a black screen so always pick active low:
+        */
+       nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
+                     NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
+       nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
+                     NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
+
+       burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
+                    !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
+
+       if (burst_mode) {
+               nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
+               nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
+       } else {
+               mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
+                               NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
+                               NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
+               nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
+               nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
+                             dsi->mode.hdisplay);
+       }
+
+       nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
+       nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
+       nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
+
+       nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
+       nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
+       nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
+       nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
+
+       nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
+       nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
+       nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
+       nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
+
+       return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
+{
+       u32 irq_enable;
+
+       nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff);
+       nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
+
+       irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
+                           NWL_DSI_RX_PKT_HDR_RCVD_MASK |
+                           NWL_DSI_TX_FIFO_OVFLW_MASK |
+                           NWL_DSI_HS_TX_TIMEOUT_MASK);
+
+       nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
+
+       return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
+                              struct mipi_dsi_device *device)
+{
+       struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+       struct device *dev = dsi->dev;
+
+       DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
+                    device->format, device->mode_flags);
+
+       if (device->lanes < 1 || device->lanes > 4)
+               return -EINVAL;
+
+       dsi->lanes = device->lanes;
+       dsi->format = device->format;
+       dsi->dsi_mode_flags = device->mode_flags;
+
+       return 0;
+}
+
+static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
+{
+       struct device *dev = dsi->dev;
+       struct nwl_dsi_transfer *xfer = dsi->xfer;
+       int err;
+       u8 *payload = xfer->msg->rx_buf;
+       u32 val;
+       u16 word_count;
+       u8 channel;
+       u8 data_type;
+
+       xfer->status = 0;
+
+       if (xfer->rx_word_count == 0) {
+               if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
+                       return false;
+               /* Get the RX header and parse it */
+               val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
+               err = nwl_dsi_clear_error(dsi);
+               if (err)
+                       xfer->status = err;
+               word_count = NWL_DSI_WC(val);
+               channel = NWL_DSI_RX_VC(val);
+               data_type = NWL_DSI_RX_DT(val);
+
+               if (channel != xfer->msg->channel) {
+                       DRM_DEV_ERROR(dev,
+                                     "[%02X] Channel mismatch (%u != %u)\n",
+                                     xfer->cmd, channel, xfer->msg->channel);
+                       xfer->status = -EINVAL;
+                       return true;
+               }
+
+               switch (data_type) {
+               case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+                       fallthrough;
+               case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+                       if (xfer->msg->rx_len > 1) {
+                               /* read second byte */
+                               payload[1] = word_count >> 8;
+                               ++xfer->rx_len;
+                       }
+                       fallthrough;
+               case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+                       fallthrough;
+               case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+                       if (xfer->msg->rx_len > 0) {
+                               /* read first byte */
+                               payload[0] = word_count & 0xff;
+                               ++xfer->rx_len;
+                       }
+                       xfer->status = xfer->rx_len;
+                       return true;
+               case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+                       word_count &= 0xff;
+                       DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
+                                     xfer->cmd, word_count);
+                       xfer->status = -EPROTO;
+                       return true;
+               }
+
+               if (word_count > xfer->msg->rx_len) {
+                       DRM_DEV_ERROR(dev,
+                               "[%02X] Receive buffer too small: %zu (< %u)\n",
+                               xfer->cmd, xfer->msg->rx_len, word_count);
+                       xfer->status = -EINVAL;
+                       return true;
+               }
+
+               xfer->rx_word_count = word_count;
+       } else {
+               /* Set word_count from previous header read */
+               word_count = xfer->rx_word_count;
+       }
+
+       /* If RX payload is not yet received, wait for it */
+       if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
+               return false;
+
+       /* Read the RX payload */
+       while (word_count >= 4) {
+               val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+               payload[0] = (val >> 0) & 0xff;
+               payload[1] = (val >> 8) & 0xff;
+               payload[2] = (val >> 16) & 0xff;
+               payload[3] = (val >> 24) & 0xff;
+               payload += 4;
+               xfer->rx_len += 4;
+               word_count -= 4;
+       }
+
+       if (word_count > 0) {
+               val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+               switch (word_count) {
+               case 3:
+                       payload[2] = (val >> 16) & 0xff;
+                       ++xfer->rx_len;
+                       fallthrough;
+               case 2:
+                       payload[1] = (val >> 8) & 0xff;
+                       ++xfer->rx_len;
+                       fallthrough;
+               case 1:
+                       payload[0] = (val >> 0) & 0xff;
+                       ++xfer->rx_len;
+                       break;
+               }
+       }
+
+       xfer->status = xfer->rx_len;
+       err = nwl_dsi_clear_error(dsi);
+       if (err)
+               xfer->status = err;
+
+       return true;
+}
+
+static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
+{
+       struct nwl_dsi_transfer *xfer = dsi->xfer;
+       bool end_packet = false;
+
+       if (!xfer)
+               return;
+
+       if (xfer->direction == DSI_PACKET_SEND &&
+           status & NWL_DSI_TX_PKT_DONE) {
+               xfer->status = xfer->tx_len;
+               end_packet = true;
+       } else if (status & NWL_DSI_DPHY_DIRECTION &&
+                  ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
+                              NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
+               end_packet = nwl_dsi_read_packet(dsi, status);
+       }
+
+       if (end_packet)
+               complete(&xfer->completed);
+}
+
+static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
+{
+       struct nwl_dsi_transfer *xfer = dsi->xfer;
+       struct mipi_dsi_packet *pkt = &xfer->packet;
+       const u8 *payload;
+       size_t length;
+       u16 word_count;
+       u8 hs_mode;
+       u32 val;
+       u32 hs_workaround = 0;
+
+       /* Send the payload, if any */
+       length = pkt->payload_length;
+       payload = pkt->payload;
+
+       while (length >= 4) {
+               val = *(u32 *)payload;
+               hs_workaround |= !(val & 0xFFFF00);
+               nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+               payload += 4;
+               length -= 4;
+       }
+       /* Send the rest of the payload */
+       val = 0;
+       switch (length) {
+       case 3:
+               val |= payload[2] << 16;
+               fallthrough;
+       case 2:
+               val |= payload[1] << 8;
+               hs_workaround |= !(val & 0xFFFF00);
+               fallthrough;
+       case 1:
+               val |= payload[0];
+               nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+               break;
+       }
+       xfer->tx_len = pkt->payload_length;
+
+       /*
+        * Send the header
+        * header[0] = Virtual Channel + Data Type
+        * header[1] = Word Count LSB (LP) or first param (SP)
+        * header[2] = Word Count MSB (LP) or second param (SP)
+        */
+       word_count = pkt->header[1] | (pkt->header[2] << 8);
+       if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
+               DRM_DEV_DEBUG_DRIVER(dsi->dev,
+                                    "Using hs mode workaround for cmd 0x%x\n",
+                                    xfer->cmd);
+               hs_mode = 1;
+       } else {
+               hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
+       }
+       val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
+             NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
+             NWL_DSI_BTA_TX(xfer->need_bta);
+       nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
+
+       /* Send packet command */
+       nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
+}
+
+static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
+                                    const struct mipi_dsi_msg *msg)
+{
+       struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+       struct nwl_dsi_transfer xfer;
+       ssize_t ret = 0;
+
+       /* Create packet to be sent */
+       dsi->xfer = &xfer;
+       ret = mipi_dsi_create_packet(&xfer.packet, msg);
+       if (ret < 0) {
+               dsi->xfer = NULL;
+               return ret;
+       }
+
+       if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
+            msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
+            msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
+            msg->type & MIPI_DSI_DCS_READ) &&
+           msg->rx_len > 0 && msg->rx_buf)
+               xfer.direction = DSI_PACKET_RECEIVE;
+       else
+               xfer.direction = DSI_PACKET_SEND;
+
+       xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
+       xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
+       xfer.msg = msg;
+       xfer.status = -ETIMEDOUT;
+       xfer.rx_word_count = 0;
+       xfer.rx_len = 0;
+       xfer.cmd = 0x00;
+       if (msg->tx_len > 0)
+               xfer.cmd = ((u8 *)(msg->tx_buf))[0];
+       init_completion(&xfer.completed);
+
+       ret = clk_prepare_enable(dsi->rx_esc_clk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
+                             ret);
+               return ret;
+       }
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
+                            clk_get_rate(dsi->rx_esc_clk));
+
+       /* Initiate the DSI packet transmision */
+       nwl_dsi_begin_transmission(dsi);
+
+       if (!wait_for_completion_timeout(&xfer.completed,
+                                        NWL_DSI_MIPI_FIFO_TIMEOUT)) {
+               DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
+                             xfer.cmd);
+               ret = -ETIMEDOUT;
+       } else {
+               ret = xfer.status;
+       }
+
+       clk_disable_unprepare(dsi->rx_esc_clk);
+
+       return ret;
+}
+
+static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
+       .attach = nwl_dsi_host_attach,
+       .transfer = nwl_dsi_host_transfer,
+};
+
+static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
+{
+       u32 irq_status;
+       struct nwl_dsi *dsi = data;
+
+       irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
+
+       if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
+               DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
+
+       if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
+               DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
+
+       if (irq_status & NWL_DSI_TX_PKT_DONE ||
+           irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
+           irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
+               nwl_dsi_finish_transmission(dsi, irq_status);
+
+       return IRQ_HANDLED;
+}
+
+static int nwl_dsi_enable(struct nwl_dsi *dsi)
+{
+       struct device *dev = dsi->dev;
+       union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
+       int ret;
+
+       if (!dsi->lanes) {
+               DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
+               return -EINVAL;
+       }
+
+       ret = phy_init(dsi->phy);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
+               return ret;
+       }
+
+       ret = phy_configure(dsi->phy, phy_cfg);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
+               goto uninit_phy;
+       }
+
+       ret = clk_prepare_enable(dsi->tx_esc_clk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
+                             ret);
+               goto uninit_phy;
+       }
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
+                            clk_get_rate(dsi->tx_esc_clk));
+
+       ret = nwl_dsi_config_host(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
+               goto disable_clock;
+       }
+
+       ret = nwl_dsi_config_dpi(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
+               goto disable_clock;
+       }
+
+       ret = phy_power_on(dsi->phy);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
+               goto disable_clock;
+       }
+
+       ret = nwl_dsi_init_interrupts(dsi);
+       if (ret < 0)
+               goto power_off_phy;
+
+       return ret;
+
+power_off_phy:
+       phy_power_off(dsi->phy);
+disable_clock:
+       clk_disable_unprepare(dsi->tx_esc_clk);
+uninit_phy:
+       phy_exit(dsi->phy);
+
+       return ret;
+}
+
+static int nwl_dsi_disable(struct nwl_dsi *dsi)
+{
+       struct device *dev = dsi->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
+
+       phy_power_off(dsi->phy);
+       phy_exit(dsi->phy);
+
+       /* Disabling the clock before the phy breaks enabling dsi again */
+       clk_disable_unprepare(dsi->tx_esc_clk);
+
+       return 0;
+}
+
+static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int ret;
+
+       nwl_dsi_disable(dsi);
+
+       ret = reset_control_assert(dsi->rst_dpi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
+               return;
+       }
+       ret = reset_control_assert(dsi->rst_byte);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
+               return;
+       }
+       ret = reset_control_assert(dsi->rst_esc);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
+               return;
+       }
+       ret = reset_control_assert(dsi->rst_pclk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
+               return;
+       }
+
+       clk_disable_unprepare(dsi->core_clk);
+       clk_disable_unprepare(dsi->lcdif_clk);
+
+       pm_runtime_put(dsi->dev);
+}
+
+static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
+                                  const struct drm_display_mode *mode,
+                                  union phy_configure_opts *phy_opts)
+{
+       unsigned long rate;
+       int ret;
+
+       if (dsi->lanes < 1 || dsi->lanes > 4)
+               return -EINVAL;
+
+       /*
+        * So far the DPHY spec minimal timings work for both mixel
+        * dphy and nwl dsi host
+        */
+       ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
+               mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
+               &phy_opts->mipi_dphy);
+       if (ret < 0)
+               return ret;
+
+       rate = clk_get_rate(dsi->tx_esc_clk);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
+       phy_opts->mipi_dphy.lp_clk_rate = rate;
+
+       return 0;
+}
+
+static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+                                     const struct drm_display_mode *mode,
+                                     struct drm_display_mode *adjusted_mode)
+{
+       /* At least LCDIF + NWL needs active high sync */
+       adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+       adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+
+       return true;
+}
+
+static enum drm_mode_status
+nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+                         const struct drm_display_mode *mode)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+       if (mode->clock * bpp > 15000000 * dsi->lanes)
+               return MODE_CLOCK_HIGH;
+
+       if (mode->clock * bpp < 80000 * dsi->lanes)
+               return MODE_CLOCK_LOW;
+
+       return MODE_OK;
+}
+
+static void
+nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+                       const struct drm_display_mode *mode,
+                       const struct drm_display_mode *adjusted_mode)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       struct device *dev = dsi->dev;
+       union phy_configure_opts new_cfg;
+       unsigned long phy_ref_rate;
+       int ret;
+
+       ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
+       if (ret < 0)
+               return;
+
+       /*
+        * If hs clock is unchanged, we're all good - all parameters are
+        * derived from it atm.
+        */
+       if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate)
+               return;
+
+       phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
+       DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
+       /* Save the new desired phy config */
+       memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
+
+       memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
+       drm_mode_debug_printmodeline(adjusted_mode);
+}
+
+static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int ret;
+
+       pm_runtime_get_sync(dsi->dev);
+
+       if (clk_prepare_enable(dsi->lcdif_clk) < 0)
+               return;
+       if (clk_prepare_enable(dsi->core_clk) < 0)
+               return;
+
+       /* Step 1 from DSI reset-out instructions */
+       ret = reset_control_deassert(dsi->rst_pclk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret);
+               return;
+       }
+
+       /* Step 2 from DSI reset-out instructions */
+       nwl_dsi_enable(dsi);
+
+       /* Step 3 from DSI reset-out instructions */
+       ret = reset_control_deassert(dsi->rst_esc);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret);
+               return;
+       }
+       ret = reset_control_deassert(dsi->rst_byte);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret);
+               return;
+       }
+}
+
+static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int ret;
+
+       /* Step 5 from DSI reset-out instructions */
+       ret = reset_control_deassert(dsi->rst_dpi);
+       if (ret < 0)
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
+}
+
+static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+                                enum drm_bridge_attach_flags flags)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       struct drm_bridge *panel_bridge;
+       struct drm_panel *panel;
+       int ret;
+
+       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+               DRM_ERROR("Fix bridge driver to make connector optional!");
+               return -EINVAL;
+       }
+
+       ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
+                                         &panel_bridge);
+       if (ret)
+               return ret;
+
+       if (panel) {
+               panel_bridge = drm_panel_bridge_add(panel);
+               if (IS_ERR(panel_bridge))
+                       return PTR_ERR(panel_bridge);
+       }
+       dsi->panel_bridge = panel_bridge;
+
+       if (!dsi->panel_bridge)
+               return -EPROBE_DEFER;
+
+       return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+                                flags);
+}
+
+static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
+{      struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+
+       drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
+}
+
+static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
+       .pre_enable = nwl_dsi_bridge_pre_enable,
+       .enable     = nwl_dsi_bridge_enable,
+       .disable    = nwl_dsi_bridge_disable,
+       .mode_fixup = nwl_dsi_bridge_mode_fixup,
+       .mode_set   = nwl_dsi_bridge_mode_set,
+       .mode_valid = nwl_dsi_bridge_mode_valid,
+       .attach     = nwl_dsi_bridge_attach,
+       .detach     = nwl_dsi_bridge_detach,
+};
+
+static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
+{
+       struct platform_device *pdev = to_platform_device(dsi->dev);
+       struct clk *clk;
+       void __iomem *base;
+       int ret;
+
+       dsi->phy = devm_phy_get(dsi->dev, "dphy");
+       if (IS_ERR(dsi->phy)) {
+               ret = PTR_ERR(dsi->phy);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
+               return ret;
+       }
+
+       clk = devm_clk_get(dsi->dev, "lcdif");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->lcdif_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "core");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->core_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "phy_ref");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->phy_ref_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "rx_esc");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->rx_esc_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "tx_esc");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->tx_esc_clk = clk;
+
+       dsi->mux = devm_mux_control_get(dsi->dev, NULL);
+       if (IS_ERR(dsi->mux)) {
+               ret = PTR_ERR(dsi->mux);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
+               return ret;
+       }
+
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       dsi->regmap =
+               devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
+       if (IS_ERR(dsi->regmap)) {
+               ret = PTR_ERR(dsi->regmap);
+               DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
+                             ret);
+               return ret;
+       }
+
+       dsi->irq = platform_get_irq(pdev, 0);
+       if (dsi->irq < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
+                             dsi->irq);
+               return dsi->irq;
+       }
+
+       dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
+       if (IS_ERR(dsi->rst_pclk)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
+                             PTR_ERR(dsi->rst_pclk));
+               return PTR_ERR(dsi->rst_pclk);
+       }
+       dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
+       if (IS_ERR(dsi->rst_byte)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
+                             PTR_ERR(dsi->rst_byte));
+               return PTR_ERR(dsi->rst_byte);
+       }
+       dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
+       if (IS_ERR(dsi->rst_esc)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
+                             PTR_ERR(dsi->rst_esc));
+               return PTR_ERR(dsi->rst_esc);
+       }
+       dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
+       if (IS_ERR(dsi->rst_dpi)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
+                             PTR_ERR(dsi->rst_dpi));
+               return PTR_ERR(dsi->rst_dpi);
+       }
+       return 0;
+}
+
+static int nwl_dsi_select_input(struct nwl_dsi *dsi)
+{
+       struct device_node *remote;
+       u32 use_dcss = 1;
+       int ret;
+
+       remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+                                         NWL_DSI_ENDPOINT_LCDIF);
+       if (remote) {
+               use_dcss = 0;
+       } else {
+               remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+                                                 NWL_DSI_ENDPOINT_DCSS);
+               if (!remote) {
+                       DRM_DEV_ERROR(dsi->dev,
+                                     "No valid input endpoint found\n");
+                       return -EINVAL;
+               }
+       }
+
+       DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
+                    (use_dcss) ? "DCSS" : "LCDIF");
+       ret = mux_control_try_select(dsi->mux, use_dcss);
+       if (ret < 0)
+               DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
+
+       of_node_put(remote);
+       return ret;
+}
+
+static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
+{
+       int ret;
+
+       ret = mux_control_deselect(dsi->mux);
+       if (ret < 0)
+               DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
+
+       return ret;
+}
+
+static const struct drm_bridge_timings nwl_dsi_timings = {
+       .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+static const struct of_device_id nwl_dsi_dt_ids[] = {
+       { .compatible = "fsl,imx8mq-nwl-dsi", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
+
+static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
+       { .soc_id = "i.MX8MQ", .revision = "2.0",
+         .data = (void *)E11418_HS_MODE_QUIRK },
+       { /* sentinel. */ },
+};
+
+static int nwl_dsi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       const struct soc_device_attribute *attr;
+       struct nwl_dsi *dsi;
+       int ret;
+
+       dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return -ENOMEM;
+
+       dsi->dev = dev;
+
+       ret = nwl_dsi_parse_dt(dsi);
+       if (ret)
+               return ret;
+
+       ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
+                              dev_name(dev), dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
+                             ret);
+               return ret;
+       }
+
+       dsi->dsi_host.ops = &nwl_dsi_host_ops;
+       dsi->dsi_host.dev = dev;
+       ret = mipi_dsi_host_register(&dsi->dsi_host);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
+               return ret;
+       }
+
+       attr = soc_device_match(nwl_dsi_quirks_match);
+       if (attr)
+               dsi->quirks = (uintptr_t)attr->data;
+
+       dsi->bridge.driver_private = dsi;
+       dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
+       dsi->bridge.of_node = dev->of_node;
+       dsi->bridge.timings = &nwl_dsi_timings;
+
+       dev_set_drvdata(dev, dsi);
+       pm_runtime_enable(dev);
+
+       ret = nwl_dsi_select_input(dsi);
+       if (ret < 0) {
+               mipi_dsi_host_unregister(&dsi->dsi_host);
+               return ret;
+       }
+
+       drm_bridge_add(&dsi->bridge);
+       return 0;
+}
+
+static int nwl_dsi_remove(struct platform_device *pdev)
+{
+       struct nwl_dsi *dsi = platform_get_drvdata(pdev);
+
+       nwl_dsi_deselect_input(dsi);
+       mipi_dsi_host_unregister(&dsi->dsi_host);
+       drm_bridge_remove(&dsi->bridge);
+       pm_runtime_disable(&pdev->dev);
+       return 0;
+}
+
+static struct platform_driver nwl_dsi_driver = {
+       .probe          = nwl_dsi_probe,
+       .remove         = nwl_dsi_remove,
+       .driver         = {
+               .of_match_table = nwl_dsi_dt_ids,
+               .name   = DRV_NAME,
+       },
+};
+
+module_platform_driver(nwl_dsi_driver);
+
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Purism SPC");
+MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
+MODULE_LICENSE("GPL"); /* GPLv2 or later */
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h
new file mode 100644 (file)
index 0000000..a247a8a
--- /dev/null
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2019 Purism SPC
+ */
+#ifndef __NWL_DSI_H__
+#define __NWL_DSI_H__
+
+/* DSI HOST registers */
+#define NWL_DSI_CFG_NUM_LANES                  0x0
+#define NWL_DSI_CFG_NONCONTINUOUS_CLK          0x4
+#define NWL_DSI_CFG_T_PRE                      0x8
+#define NWL_DSI_CFG_T_POST                     0xc
+#define NWL_DSI_CFG_TX_GAP                     0x10
+#define NWL_DSI_CFG_AUTOINSERT_EOTP            0x14
+#define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP      0x18
+#define NWL_DSI_CFG_HTX_TO_COUNT               0x1c
+#define NWL_DSI_CFG_LRX_H_TO_COUNT             0x20
+#define NWL_DSI_CFG_BTA_H_TO_COUNT             0x24
+#define NWL_DSI_CFG_TWAKEUP                    0x28
+#define NWL_DSI_CFG_STATUS_OUT                 0x2c
+#define NWL_DSI_RX_ERROR_STATUS                        0x30
+
+/* DSI DPI registers */
+#define NWL_DSI_PIXEL_PAYLOAD_SIZE             0x200
+#define NWL_DSI_PIXEL_FIFO_SEND_LEVEL          0x204
+#define NWL_DSI_INTERFACE_COLOR_CODING         0x208
+#define NWL_DSI_PIXEL_FORMAT                   0x20c
+#define NWL_DSI_VSYNC_POLARITY                 0x210
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW      0
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH     BIT(1)
+
+#define NWL_DSI_HSYNC_POLARITY                 0x214
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW      0
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH     BIT(1)
+
+#define NWL_DSI_VIDEO_MODE                     0x218
+#define NWL_DSI_HFP                            0x21c
+#define NWL_DSI_HBP                            0x220
+#define NWL_DSI_HSA                            0x224
+#define NWL_DSI_ENABLE_MULT_PKTS               0x228
+#define NWL_DSI_VBP                            0x22c
+#define NWL_DSI_VFP                            0x230
+#define NWL_DSI_BLLP_MODE                      0x234
+#define NWL_DSI_USE_NULL_PKT_BLLP              0x238
+#define NWL_DSI_VACTIVE                                0x23c
+#define NWL_DSI_VC                             0x240
+
+/* DSI APB PKT control */
+#define NWL_DSI_TX_PAYLOAD                     0x280
+#define NWL_DSI_PKT_CONTROL                    0x284
+#define NWL_DSI_SEND_PACKET                    0x288
+#define NWL_DSI_PKT_STATUS                     0x28c
+#define NWL_DSI_PKT_FIFO_WR_LEVEL              0x290
+#define NWL_DSI_PKT_FIFO_RD_LEVEL              0x294
+#define NWL_DSI_RX_PAYLOAD                     0x298
+#define NWL_DSI_RX_PKT_HEADER                  0x29c
+
+/* DSI IRQ handling */
+#define NWL_DSI_IRQ_STATUS                     0x2a0
+#define NWL_DSI_SM_NOT_IDLE                    BIT(0)
+#define NWL_DSI_TX_PKT_DONE                    BIT(1)
+#define NWL_DSI_DPHY_DIRECTION                 BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW                  BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW                  BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW                  BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW                  BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD                        BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD       BIT(8)
+#define NWL_DSI_BTA_TIMEOUT                    BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT                  BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT                  BIT(31)
+
+#define NWL_DSI_IRQ_STATUS2                    0x2a4
+#define NWL_DSI_SINGLE_BIT_ECC_ERR             BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR              BIT(1)
+#define NWL_DSI_CRC_ERR                                BIT(2)
+
+#define NWL_DSI_IRQ_MASK                       0x2a8
+#define NWL_DSI_SM_NOT_IDLE_MASK               BIT(0)
+#define NWL_DSI_TX_PKT_DONE_MASK               BIT(1)
+#define NWL_DSI_DPHY_DIRECTION_MASK            BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW_MASK             BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW_MASK             BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW_MASK             BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW_MASK             BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD_MASK           BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK  BIT(8)
+#define NWL_DSI_BTA_TIMEOUT_MASK               BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT_MASK             BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT_MASK             BIT(31)
+
+#define NWL_DSI_IRQ_MASK2                      0x2ac
+#define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK                BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR_MASK         BIT(1)
+#define NWL_DSI_CRC_ERR_MASK                   BIT(2)
+
+/*
+ * PKT_CONTROL format:
+ * [15: 0] - word count
+ * [17:16] - virtual channel
+ * [23:18] - data type
+ * [24]           - LP or HS select (0 - LP, 1 - HS)
+ * [25]           - perform BTA after packet is sent
+ * [26]           - perform BTA only, no packet tx
+ */
+#define NWL_DSI_WC(x)          FIELD_PREP(GENMASK(15, 0), (x))
+#define NWL_DSI_TX_VC(x)       FIELD_PREP(GENMASK(17, 16), (x))
+#define NWL_DSI_TX_DT(x)       FIELD_PREP(GENMASK(23, 18), (x))
+#define NWL_DSI_HS_SEL(x)      FIELD_PREP(GENMASK(24, 24), (x))
+#define NWL_DSI_BTA_TX(x)      FIELD_PREP(GENMASK(25, 25), (x))
+#define NWL_DSI_BTA_NO_TX(x)   FIELD_PREP(GENMASK(26, 26), (x))
+
+/*
+ * RX_PKT_HEADER format:
+ * [15: 0] - word count
+ * [21:16] - data type
+ * [23:22] - virtual channel
+ */
+#define NWL_DSI_RX_DT(x)       FIELD_GET(GENMASK(21, 16), (x))
+#define NWL_DSI_RX_VC(x)       FIELD_GET(GENMASK(23, 22), (x))
+
+/* DSI Video mode */
+#define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES         0
+#define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS     BIT(0)
+#define NWL_DSI_VM_BURST_MODE                          BIT(1)
+
+/* * DPI color coding */
+#define NWL_DSI_DPI_16_BIT_565_PACKED  0
+#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1
+#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2
+#define NWL_DSI_DPI_18_BIT_PACKED      3
+#define NWL_DSI_DPI_18_BIT_ALIGNED     4
+#define NWL_DSI_DPI_24_BIT             5
+
+/* * DPI Pixel format */
+#define NWL_DSI_PIXEL_FORMAT_16  0
+#define NWL_DSI_PIXEL_FORMAT_18  BIT(0)
+#define NWL_DSI_PIXEL_FORMAT_18L BIT(1)
+#define NWL_DSI_PIXEL_FORMAT_24  (BIT(0) | BIT(1))
+
+#endif /* __NWL_DSI_H__ */
index 8461ee8304ba75bb160b9cda422be3a9f2ad6a82..1e63ed6b18aa4f9b6013382696bb8b08f0ef5e9a 100644 (file)
@@ -166,7 +166,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
  *
  * The connector type is set to @panel->connector_type, which must be set to a
  * known type. Calling this function with a panel whose connector type is
- * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ * DRM_MODE_CONNECTOR_Unknown will return ERR_PTR(-EINVAL).
  *
  * See devm_drm_panel_bridge_add() for an automatically managed version of this
  * function.
@@ -174,7 +174,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
 struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
 {
        if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        return drm_panel_bridge_add_typed(panel, panel->connector_type);
 }
@@ -265,7 +265,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
                                             struct drm_panel *panel)
 {
        if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        return devm_drm_panel_bridge_add_typed(dev, panel,
                                               panel->connector_type);
@@ -311,6 +311,7 @@ EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
 
 /**
  * drm_panel_bridge_connector - return the connector for the panel bridge
+ * @bridge: The drm_bridge.
  *
  * drm_panel_bridge creates the connector.
  * This function gives external access to the connector.
index d3a53442d449a4a53ddc0f588bc57bd53c2146b8..4b099196afeba12815bf41cc00372cbd4e8fd297 100644 (file)
@@ -268,8 +268,6 @@ static int ps8640_probe(struct i2c_client *client)
        if (!panel)
                return -ENODEV;
 
-       panel->connector_type = DRM_MODE_CONNECTOR_eDP;
-
        ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
        if (IS_ERR(ps_bridge->panel_bridge))
                return PTR_ERR(ps_bridge->panel_bridge);
index f81f81b7051f6df165de89c5df8c5583bc14982d..b1258f0ed20551b85072138bf0ad3c9cb56fd3bc 100644 (file)
@@ -836,7 +836,8 @@ static int sii9234_init_resources(struct sii9234 *ctx,
        ctx->supplies[3].supply = "cvcc12";
        ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
        if (ret) {
-               dev_err(ctx->dev, "regulator_bulk failed\n");
+               if (ret != -EPROBE_DEFER)
+                       dev_err(ctx->dev, "regulator_bulk failed\n");
                return ret;
        }
 
index 383b1073d7de43cf1afe41d8d611b79ea5be3591..30681398cfb0576af7698219aa0ec7693f72ad2f 100644 (file)
@@ -92,6 +92,12 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
        { 0x6756, 0x78ab, 0x2000, 0x0200 }
 };
 
+static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = {
+       { 0x1b7c, 0x0000, 0x0000, 0x0020 },
+       { 0x0000, 0x1b7c, 0x0000, 0x0020 },
+       { 0x0000, 0x0000, 0x1b7c, 0x0020 }
+};
+
 struct hdmi_vmode {
        bool mdataenablepolarity;
 
@@ -109,6 +115,7 @@ struct hdmi_data_info {
        unsigned int pix_repet_factor;
        unsigned int hdcp_enable;
        struct hdmi_vmode video_mode;
+       bool rgb_limited_range;
 };
 
 struct dw_hdmi_i2c {
@@ -956,7 +963,14 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi)
 
 static int is_color_space_conversion(struct dw_hdmi *hdmi)
 {
-       return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format;
+       struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+       bool is_input_rgb, is_output_rgb;
+
+       is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format);
+       is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format);
+
+       return (is_input_rgb != is_output_rgb) ||
+              (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range);
 }
 
 static int is_color_space_decimation(struct dw_hdmi *hdmi)
@@ -983,28 +997,37 @@ static int is_color_space_interpolation(struct dw_hdmi *hdmi)
        return 0;
 }
 
+static bool is_csc_needed(struct dw_hdmi *hdmi)
+{
+       return is_color_space_conversion(hdmi) ||
+              is_color_space_decimation(hdmi) ||
+              is_color_space_interpolation(hdmi);
+}
+
 static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
 {
        const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+       bool is_input_rgb, is_output_rgb;
        unsigned i;
        u32 csc_scale = 1;
 
-       if (is_color_space_conversion(hdmi)) {
-               if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
-                       if (hdmi->hdmi_data.enc_out_encoding ==
-                                               V4L2_YCBCR_ENC_601)
-                               csc_coeff = &csc_coeff_rgb_out_eitu601;
-                       else
-                               csc_coeff = &csc_coeff_rgb_out_eitu709;
-               } else if (hdmi_bus_fmt_is_rgb(
-                                       hdmi->hdmi_data.enc_in_bus_format)) {
-                       if (hdmi->hdmi_data.enc_out_encoding ==
-                                               V4L2_YCBCR_ENC_601)
-                               csc_coeff = &csc_coeff_rgb_in_eitu601;
-                       else
-                               csc_coeff = &csc_coeff_rgb_in_eitu709;
-                       csc_scale = 0;
-               }
+       is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format);
+       is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format);
+
+       if (!is_input_rgb && is_output_rgb) {
+               if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+                       csc_coeff = &csc_coeff_rgb_out_eitu601;
+               else
+                       csc_coeff = &csc_coeff_rgb_out_eitu709;
+       } else if (is_input_rgb && !is_output_rgb) {
+               if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+                       csc_coeff = &csc_coeff_rgb_in_eitu601;
+               else
+                       csc_coeff = &csc_coeff_rgb_in_eitu709;
+               csc_scale = 0;
+       } else if (is_input_rgb && is_output_rgb &&
+                  hdmi->hdmi_data.rgb_limited_range) {
+               csc_coeff = &csc_coeff_rgb_full_to_rgb_limited;
        }
 
        /* The CSC registers are sequential, alternating MSB then LSB */
@@ -1614,6 +1637,18 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
        drm_hdmi_avi_infoframe_from_display_mode(&frame,
                                                 &hdmi->connector, mode);
 
+       if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+               drm_hdmi_avi_infoframe_quant_range(&frame, &hdmi->connector,
+                                                  mode,
+                                                  hdmi->hdmi_data.rgb_limited_range ?
+                                                  HDMI_QUANTIZATION_RANGE_LIMITED :
+                                                  HDMI_QUANTIZATION_RANGE_FULL);
+       } else {
+               frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+               frame.ycc_quantization_range =
+                       HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+       }
+
        if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
                frame.colorspace = HDMI_COLORSPACE_YUV444;
        else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
@@ -1654,8 +1689,6 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
                        HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
        }
 
-       frame.scan_mode = HDMI_SCAN_MODE_NONE;
-
        /*
         * The Designware IP uses a different byte format from standard
         * AVI info frames, though generally the bits are in the correct
@@ -2010,18 +2043,19 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
        hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
 
        /* Enable csc path */
-       if (is_color_space_conversion(hdmi)) {
+       if (is_csc_needed(hdmi)) {
                hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
                hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
-       }
 
-       /* Enable color space conversion if needed */
-       if (is_color_space_conversion(hdmi))
                hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH,
                            HDMI_MC_FLOWCTRL);
-       else
+       } else {
+               hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+               hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+
                hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS,
                            HDMI_MC_FLOWCTRL);
+       }
 }
 
 /* Workaround to clear the overflow condition */
@@ -2119,6 +2153,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
        if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
                hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
+       hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi &&
+               drm_default_rgb_quant_range(mode) ==
+               HDMI_QUANTIZATION_RANGE_LIMITED;
+
        hdmi->hdmi_data.pix_repet_factor = 0;
        hdmi->hdmi_data.hdcp_enable = 0;
        hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
index 1b39e8d37834a2e095944a65623ff3833834a26c..6650fe4cfc20f79585eb1591f3437425dc033d5a 100644 (file)
@@ -178,6 +178,8 @@ static int tc358768_clear_error(struct tc358768_priv *priv)
 
 static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
 {
+       /* work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+       int tmpval = val;
        size_t count = 2;
 
        if (priv->error)
@@ -187,7 +189,7 @@ static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
        if (reg < 0x100 || reg >= 0x600)
                count = 1;
 
-       priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+       priv->error = regmap_bulk_write(priv->regmap, reg, &tmpval, count);
 }
 
 static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
deleted file mode 100644 (file)
index c6bbd98..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_CIRRUS_QEMU
-       tristate "Cirrus driver for QEMU emulated device"
-       depends on DRM && PCI && MMU
-       select DRM_KMS_HELPER
-       select DRM_GEM_SHMEM_HELPER
-       help
-        This is a KMS driver for emulated cirrus device in qemu.
-        It is *NOT* intended for real cirrus devices. This requires
-        the modesetting userspace X.org driver.
-
-        Cirrus is obsolete, the hardware was designed in the 90ies
-        and can't keep up with todays needs.  More background:
-        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
-
-        Better alternatives are:
-          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
-          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
-          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
deleted file mode 100644 (file)
index 0c1ed3f..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
deleted file mode 100644 (file)
index d2ff63c..0000000
+++ /dev/null
@@ -1,680 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2012-2019 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- *         Dave Airlie
- *         Gerd Hoffmann
- *
- * Portions of this code derived from cirrusfb.c:
- * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
- *
- * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
- */
-
-#include <linux/console.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <video/cirrus.h>
-#include <video/vga.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_connector.h>
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_file.h>
-#include <drm/drm_format_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_shmem_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#define DRIVER_NAME "cirrus"
-#define DRIVER_DESC "qemu cirrus vga"
-#define DRIVER_DATE "2019"
-#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 0
-
-#define CIRRUS_MAX_PITCH (0x1FF << 3)      /* (4096 - 1) & ~111b bytes */
-#define CIRRUS_VRAM_SIZE (4 * 1024 * 1024) /* 4 MB */
-
-struct cirrus_device {
-       struct drm_device              dev;
-       struct drm_simple_display_pipe pipe;
-       struct drm_connector           conn;
-       unsigned int                   cpp;
-       unsigned int                   pitch;
-       void __iomem                   *vram;
-       void __iomem                   *mmio;
-};
-
-/* ------------------------------------------------------------------ */
-/*
- * The meat of this driver. The core passes us a mode and we have to program
- * it. The modesetting here is the bare minimum required to satisfy the qemu
- * emulation of this hardware, and running this against a real device is
- * likely to result in an inadequately programmed mode. We've already had
- * the opportunity to modify the mode, so whatever we receive here should
- * be something that can be correctly programmed and displayed
- */
-
-#define SEQ_INDEX 4
-#define SEQ_DATA 5
-
-static u8 rreg_seq(struct cirrus_device *cirrus, u8 reg)
-{
-       iowrite8(reg, cirrus->mmio + SEQ_INDEX);
-       return ioread8(cirrus->mmio + SEQ_DATA);
-}
-
-static void wreg_seq(struct cirrus_device *cirrus, u8 reg, u8 val)
-{
-       iowrite8(reg, cirrus->mmio + SEQ_INDEX);
-       iowrite8(val, cirrus->mmio + SEQ_DATA);
-}
-
-#define CRT_INDEX 0x14
-#define CRT_DATA 0x15
-
-static u8 rreg_crt(struct cirrus_device *cirrus, u8 reg)
-{
-       iowrite8(reg, cirrus->mmio + CRT_INDEX);
-       return ioread8(cirrus->mmio + CRT_DATA);
-}
-
-static void wreg_crt(struct cirrus_device *cirrus, u8 reg, u8 val)
-{
-       iowrite8(reg, cirrus->mmio + CRT_INDEX);
-       iowrite8(val, cirrus->mmio + CRT_DATA);
-}
-
-#define GFX_INDEX 0xe
-#define GFX_DATA 0xf
-
-static void wreg_gfx(struct cirrus_device *cirrus, u8 reg, u8 val)
-{
-       iowrite8(reg, cirrus->mmio + GFX_INDEX);
-       iowrite8(val, cirrus->mmio + GFX_DATA);
-}
-
-#define VGA_DAC_MASK  0x06
-
-static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
-{
-       ioread8(cirrus->mmio + VGA_DAC_MASK);
-       ioread8(cirrus->mmio + VGA_DAC_MASK);
-       ioread8(cirrus->mmio + VGA_DAC_MASK);
-       ioread8(cirrus->mmio + VGA_DAC_MASK);
-       iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
-}
-
-static int cirrus_convert_to(struct drm_framebuffer *fb)
-{
-       if (fb->format->cpp[0] == 4 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
-               if (fb->width * 3 <= CIRRUS_MAX_PITCH)
-                       /* convert from XR24 to RG24 */
-                       return 3;
-               else
-                       /* convert from XR24 to RG16 */
-                       return 2;
-       }
-       return 0;
-}
-
-static int cirrus_cpp(struct drm_framebuffer *fb)
-{
-       int convert_cpp = cirrus_convert_to(fb);
-
-       if (convert_cpp)
-               return convert_cpp;
-       return fb->format->cpp[0];
-}
-
-static int cirrus_pitch(struct drm_framebuffer *fb)
-{
-       int convert_cpp = cirrus_convert_to(fb);
-
-       if (convert_cpp)
-               return convert_cpp * fb->width;
-       return fb->pitches[0];
-}
-
-static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
-{
-       int idx;
-       u32 addr;
-       u8 tmp;
-
-       if (!drm_dev_enter(&cirrus->dev, &idx))
-               return;
-
-       addr = offset >> 2;
-       wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
-       wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
-
-       tmp = rreg_crt(cirrus, 0x1b);
-       tmp &= 0xf2;
-       tmp |= (addr >> 16) & 0x01;
-       tmp |= (addr >> 15) & 0x0c;
-       wreg_crt(cirrus, 0x1b, tmp);
-
-       tmp = rreg_crt(cirrus, 0x1d);
-       tmp &= 0x7f;
-       tmp |= (addr >> 12) & 0x80;
-       wreg_crt(cirrus, 0x1d, tmp);
-
-       drm_dev_exit(idx);
-}
-
-static int cirrus_mode_set(struct cirrus_device *cirrus,
-                          struct drm_display_mode *mode,
-                          struct drm_framebuffer *fb)
-{
-       int hsyncstart, hsyncend, htotal, hdispend;
-       int vtotal, vdispend;
-       int tmp, idx;
-       int sr07 = 0, hdr = 0;
-
-       if (!drm_dev_enter(&cirrus->dev, &idx))
-               return -1;
-
-       htotal = mode->htotal / 8;
-       hsyncend = mode->hsync_end / 8;
-       hsyncstart = mode->hsync_start / 8;
-       hdispend = mode->hdisplay / 8;
-
-       vtotal = mode->vtotal;
-       vdispend = mode->vdisplay;
-
-       vdispend -= 1;
-       vtotal -= 2;
-
-       htotal -= 5;
-       hdispend -= 1;
-       hsyncstart += 1;
-       hsyncend += 1;
-
-       wreg_crt(cirrus, VGA_CRTC_V_SYNC_END, 0x20);
-       wreg_crt(cirrus, VGA_CRTC_H_TOTAL, htotal);
-       wreg_crt(cirrus, VGA_CRTC_H_DISP, hdispend);
-       wreg_crt(cirrus, VGA_CRTC_H_SYNC_START, hsyncstart);
-       wreg_crt(cirrus, VGA_CRTC_H_SYNC_END, hsyncend);
-       wreg_crt(cirrus, VGA_CRTC_V_TOTAL, vtotal & 0xff);
-       wreg_crt(cirrus, VGA_CRTC_V_DISP_END, vdispend & 0xff);
-
-       tmp = 0x40;
-       if ((vdispend + 1) & 512)
-               tmp |= 0x20;
-       wreg_crt(cirrus, VGA_CRTC_MAX_SCAN, tmp);
-
-       /*
-        * Overflow bits for values that don't fit in the standard registers
-        */
-       tmp = 0x10;
-       if (vtotal & 0x100)
-               tmp |= 0x01;
-       if (vdispend & 0x100)
-               tmp |= 0x02;
-       if ((vdispend + 1) & 0x100)
-               tmp |= 0x08;
-       if (vtotal & 0x200)
-               tmp |= 0x20;
-       if (vdispend & 0x200)
-               tmp |= 0x40;
-       wreg_crt(cirrus, VGA_CRTC_OVERFLOW, tmp);
-
-       tmp = 0;
-
-       /* More overflow bits */
-
-       if ((htotal + 5) & 0x40)
-               tmp |= 0x10;
-       if ((htotal + 5) & 0x80)
-               tmp |= 0x20;
-       if (vtotal & 0x100)
-               tmp |= 0x40;
-       if (vtotal & 0x200)
-               tmp |= 0x80;
-
-       wreg_crt(cirrus, CL_CRT1A, tmp);
-
-       /* Disable Hercules/CGA compatibility */
-       wreg_crt(cirrus, VGA_CRTC_MODE, 0x03);
-
-       sr07 = rreg_seq(cirrus, 0x07);
-       sr07 &= 0xe0;
-       hdr = 0;
-
-       cirrus->cpp = cirrus_cpp(fb);
-       switch (cirrus->cpp * 8) {
-       case 8:
-               sr07 |= 0x11;
-               break;
-       case 16:
-               sr07 |= 0x17;
-               hdr = 0xc1;
-               break;
-       case 24:
-               sr07 |= 0x15;
-               hdr = 0xc5;
-               break;
-       case 32:
-               sr07 |= 0x19;
-               hdr = 0xc5;
-               break;
-       default:
-               drm_dev_exit(idx);
-               return -1;
-       }
-
-       wreg_seq(cirrus, 0x7, sr07);
-
-       /* Program the pitch */
-       cirrus->pitch = cirrus_pitch(fb);
-       tmp = cirrus->pitch / 8;
-       wreg_crt(cirrus, VGA_CRTC_OFFSET, tmp);
-
-       /* Enable extended blanking and pitch bits, and enable full memory */
-       tmp = 0x22;
-       tmp |= (cirrus->pitch >> 7) & 0x10;
-       tmp |= (cirrus->pitch >> 6) & 0x40;
-       wreg_crt(cirrus, 0x1b, tmp);
-
-       /* Enable high-colour modes */
-       wreg_gfx(cirrus, VGA_GFX_MODE, 0x40);
-
-       /* And set graphics mode */
-       wreg_gfx(cirrus, VGA_GFX_MISC, 0x01);
-
-       wreg_hdr(cirrus, hdr);
-
-       cirrus_set_start_address(cirrus, 0);
-
-       /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
-       outb(0x20, 0x3c0);
-
-       drm_dev_exit(idx);
-       return 0;
-}
-
-static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
-                              struct drm_rect *rect)
-{
-       struct cirrus_device *cirrus = fb->dev->dev_private;
-       void *vmap;
-       int idx, ret;
-
-       ret = -ENODEV;
-       if (!drm_dev_enter(&cirrus->dev, &idx))
-               goto out;
-
-       ret = -ENOMEM;
-       vmap = drm_gem_shmem_vmap(fb->obj[0]);
-       if (!vmap)
-               goto out_dev_exit;
-
-       if (cirrus->cpp == fb->format->cpp[0])
-               drm_fb_memcpy_dstclip(cirrus->vram,
-                                     vmap, fb, rect);
-
-       else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2)
-               drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram,
-                                                 cirrus->pitch,
-                                                 vmap, fb, rect, false);
-
-       else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3)
-               drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram,
-                                                 cirrus->pitch,
-                                                 vmap, fb, rect);
-
-       else
-               WARN_ON_ONCE("cpp mismatch");
-
-       drm_gem_shmem_vunmap(fb->obj[0], vmap);
-       ret = 0;
-
-out_dev_exit:
-       drm_dev_exit(idx);
-out:
-       return ret;
-}
-
-static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
-{
-       struct drm_rect fullscreen = {
-               .x1 = 0,
-               .x2 = fb->width,
-               .y1 = 0,
-               .y2 = fb->height,
-       };
-       return cirrus_fb_blit_rect(fb, &fullscreen);
-}
-
-static int cirrus_check_size(int width, int height,
-                            struct drm_framebuffer *fb)
-{
-       int pitch = width * 2;
-
-       if (fb)
-               pitch = cirrus_pitch(fb);
-
-       if (pitch > CIRRUS_MAX_PITCH)
-               return -EINVAL;
-       if (pitch * height > CIRRUS_VRAM_SIZE)
-               return -EINVAL;
-       return 0;
-}
-
-/* ------------------------------------------------------------------ */
-/* cirrus connector                                                  */
-
-static int cirrus_conn_get_modes(struct drm_connector *conn)
-{
-       int count;
-
-       count = drm_add_modes_noedid(conn,
-                                    conn->dev->mode_config.max_width,
-                                    conn->dev->mode_config.max_height);
-       drm_set_preferred_mode(conn, 1024, 768);
-       return count;
-}
-
-static const struct drm_connector_helper_funcs cirrus_conn_helper_funcs = {
-       .get_modes = cirrus_conn_get_modes,
-};
-
-static const struct drm_connector_funcs cirrus_conn_funcs = {
-       .fill_modes = drm_helper_probe_single_connector_modes,
-       .destroy = drm_connector_cleanup,
-       .reset = drm_atomic_helper_connector_reset,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int cirrus_conn_init(struct cirrus_device *cirrus)
-{
-       drm_connector_helper_add(&cirrus->conn, &cirrus_conn_helper_funcs);
-       return drm_connector_init(&cirrus->dev, &cirrus->conn,
-                                 &cirrus_conn_funcs, DRM_MODE_CONNECTOR_VGA);
-
-}
-
-/* ------------------------------------------------------------------ */
-/* cirrus (simple) display pipe                                              */
-
-static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
-                                                  const struct drm_display_mode *mode)
-{
-       if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
-               return MODE_BAD;
-       return MODE_OK;
-}
-
-static int cirrus_pipe_check(struct drm_simple_display_pipe *pipe,
-                            struct drm_plane_state *plane_state,
-                            struct drm_crtc_state *crtc_state)
-{
-       struct drm_framebuffer *fb = plane_state->fb;
-
-       if (!fb)
-               return 0;
-       return cirrus_check_size(fb->width, fb->height, fb);
-}
-
-static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
-                              struct drm_crtc_state *crtc_state,
-                              struct drm_plane_state *plane_state)
-{
-       struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
-
-       cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
-       cirrus_fb_blit_fullscreen(plane_state->fb);
-}
-
-static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
-                              struct drm_plane_state *old_state)
-{
-       struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
-       struct drm_plane_state *state = pipe->plane.state;
-       struct drm_crtc *crtc = &pipe->crtc;
-       struct drm_rect rect;
-
-       if (pipe->plane.state->fb &&
-           cirrus->cpp != cirrus_cpp(pipe->plane.state->fb))
-               cirrus_mode_set(cirrus, &crtc->mode,
-                               pipe->plane.state->fb);
-
-       if (drm_atomic_helper_damage_merged(old_state, state, &rect))
-               cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
-}
-
-static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
-       .mode_valid = cirrus_pipe_mode_valid,
-       .check      = cirrus_pipe_check,
-       .enable     = cirrus_pipe_enable,
-       .update     = cirrus_pipe_update,
-};
-
-static const uint32_t cirrus_formats[] = {
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_RGB888,
-       DRM_FORMAT_XRGB8888,
-};
-
-static const uint64_t cirrus_modifiers[] = {
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
-static int cirrus_pipe_init(struct cirrus_device *cirrus)
-{
-       return drm_simple_display_pipe_init(&cirrus->dev,
-                                           &cirrus->pipe,
-                                           &cirrus_pipe_funcs,
-                                           cirrus_formats,
-                                           ARRAY_SIZE(cirrus_formats),
-                                           cirrus_modifiers,
-                                           &cirrus->conn);
-}
-
-/* ------------------------------------------------------------------ */
-/* cirrus framebuffers & mode config                                 */
-
-static struct drm_framebuffer*
-cirrus_fb_create(struct drm_device *dev, struct drm_file *file_priv,
-                const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       if (mode_cmd->pixel_format != DRM_FORMAT_RGB565 &&
-           mode_cmd->pixel_format != DRM_FORMAT_RGB888 &&
-           mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
-               return ERR_PTR(-EINVAL);
-       if (cirrus_check_size(mode_cmd->width, mode_cmd->height, NULL) < 0)
-               return ERR_PTR(-EINVAL);
-       return drm_gem_fb_create_with_dirty(dev, file_priv, mode_cmd);
-}
-
-static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
-       .fb_create = cirrus_fb_create,
-       .atomic_check = drm_atomic_helper_check,
-       .atomic_commit = drm_atomic_helper_commit,
-};
-
-static void cirrus_mode_config_init(struct cirrus_device *cirrus)
-{
-       struct drm_device *dev = &cirrus->dev;
-
-       drm_mode_config_init(dev);
-       dev->mode_config.min_width = 0;
-       dev->mode_config.min_height = 0;
-       dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
-       dev->mode_config.max_height = 1024;
-       dev->mode_config.preferred_depth = 16;
-       dev->mode_config.prefer_shadow = 0;
-       dev->mode_config.funcs = &cirrus_mode_config_funcs;
-}
-
-/* ------------------------------------------------------------------ */
-
-static void cirrus_release(struct drm_device *dev)
-{
-       struct cirrus_device *cirrus = dev->dev_private;
-
-       drm_mode_config_cleanup(dev);
-       kfree(cirrus);
-}
-
-DEFINE_DRM_GEM_FOPS(cirrus_fops);
-
-static struct drm_driver cirrus_driver = {
-       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
-
-       .name            = DRIVER_NAME,
-       .desc            = DRIVER_DESC,
-       .date            = DRIVER_DATE,
-       .major           = DRIVER_MAJOR,
-       .minor           = DRIVER_MINOR,
-
-       .fops            = &cirrus_fops,
-       DRM_GEM_SHMEM_DRIVER_OPS,
-       .release         = cirrus_release,
-};
-
-static int cirrus_pci_probe(struct pci_dev *pdev,
-                           const struct pci_device_id *ent)
-{
-       struct drm_device *dev;
-       struct cirrus_device *cirrus;
-       int ret;
-
-       ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
-       if (ret)
-               return ret;
-
-       ret = pci_enable_device(pdev);
-       if (ret)
-               return ret;
-
-       ret = pci_request_regions(pdev, DRIVER_NAME);
-       if (ret)
-               return ret;
-
-       ret = -ENOMEM;
-       cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
-       if (cirrus == NULL)
-               goto err_pci_release;
-
-       dev = &cirrus->dev;
-       ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
-       if (ret)
-               goto err_free_cirrus;
-       dev->dev_private = cirrus;
-
-       ret = -ENOMEM;
-       cirrus->vram = ioremap(pci_resource_start(pdev, 0),
-                              pci_resource_len(pdev, 0));
-       if (cirrus->vram == NULL)
-               goto err_dev_put;
-
-       cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
-                              pci_resource_len(pdev, 1));
-       if (cirrus->mmio == NULL)
-               goto err_unmap_vram;
-
-       cirrus_mode_config_init(cirrus);
-
-       ret = cirrus_conn_init(cirrus);
-       if (ret < 0)
-               goto err_cleanup;
-
-       ret = cirrus_pipe_init(cirrus);
-       if (ret < 0)
-               goto err_cleanup;
-
-       drm_mode_config_reset(dev);
-
-       dev->pdev = pdev;
-       pci_set_drvdata(pdev, dev);
-       ret = drm_dev_register(dev, 0);
-       if (ret)
-               goto err_cleanup;
-
-       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
-       return 0;
-
-err_cleanup:
-       drm_mode_config_cleanup(dev);
-       iounmap(cirrus->mmio);
-err_unmap_vram:
-       iounmap(cirrus->vram);
-err_dev_put:
-       drm_dev_put(dev);
-err_free_cirrus:
-       kfree(cirrus);
-err_pci_release:
-       pci_release_regions(pdev);
-       return ret;
-}
-
-static void cirrus_pci_remove(struct pci_dev *pdev)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-       struct cirrus_device *cirrus = dev->dev_private;
-
-       drm_dev_unplug(dev);
-       drm_atomic_helper_shutdown(dev);
-       iounmap(cirrus->mmio);
-       iounmap(cirrus->vram);
-       drm_dev_put(dev);
-       pci_release_regions(pdev);
-}
-
-static const struct pci_device_id pciidlist[] = {
-       {
-               .vendor    = PCI_VENDOR_ID_CIRRUS,
-               .device    = PCI_DEVICE_ID_CIRRUS_5446,
-               /* only bind to the cirrus chip in qemu */
-               .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
-               .subdevice = PCI_SUBDEVICE_ID_QEMU,
-       }, {
-               .vendor    = PCI_VENDOR_ID_CIRRUS,
-               .device    = PCI_DEVICE_ID_CIRRUS_5446,
-               .subvendor = PCI_VENDOR_ID_XEN,
-               .subdevice = 0x0001,
-       },
-       { /* end if list */ }
-};
-
-static struct pci_driver cirrus_pci_driver = {
-       .name = DRIVER_NAME,
-       .id_table = pciidlist,
-       .probe = cirrus_pci_probe,
-       .remove = cirrus_pci_remove,
-};
-
-static int __init cirrus_init(void)
-{
-       if (vgacon_text_force())
-               return -EINVAL;
-       return pci_register_driver(&cirrus_pci_driver);
-}
-
-static void __exit cirrus_exit(void)
-{
-       pci_unregister_driver(&cirrus_pci_driver);
-}
-
-module_init(cirrus_init);
-module_exit(cirrus_exit);
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
-MODULE_LICENSE("GPL");
index 9ccfbf213d72609038bd1d79c85d285e5cba719d..965173fd0ac2549f4b419dd5814f3bd97052df9b 100644 (file)
@@ -1641,10 +1641,10 @@ static const struct drm_info_list drm_atomic_debugfs_list[] = {
        {"state", drm_state_info, 0},
 };
 
-int drm_atomic_debugfs_init(struct drm_minor *minor)
+void drm_atomic_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(drm_atomic_debugfs_list,
-                       ARRAY_SIZE(drm_atomic_debugfs_list),
-                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(drm_atomic_debugfs_list,
+                                ARRAY_SIZE(drm_atomic_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
index 531b876d0ed83c7ce9e4419c309b446ada63f9d7..800ac39f3213df4f55304be6bbe5282f9ee3f768 100644 (file)
@@ -135,6 +135,7 @@ static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
                }
        }
 
+       fpriv->was_master = (ret == 0);
        return ret;
 }
 
@@ -174,17 +175,77 @@ out_err:
        return ret;
 }
 
+/*
+ * In the olden days the SET/DROP_MASTER ioctls used to return EACCES when
+ * CAP_SYS_ADMIN was not set. This was used to prevent rogue applications
+ * from becoming master and/or failing to release it.
+ *
+ * At the same time, the first client (for a given VT) is _always_ master.
+ * Thus in order for the ioctls to succeed, one had to _explicitly_ run the
+ * application as root or flip the setuid bit.
+ *
+ * If the CAP_SYS_ADMIN was missing, no other client could become master...
+ * EVER :-( Leading to a) the graphics session dying badly or b) a completely
+ * locked session.
+ *
+ *
+ * As some point systemd-logind was introduced to orchestrate and delegate
+ * master as applicable. It does so by opening the fd and passing it to users
+ * while in itself logind a) does the set/drop master per users' request and
+ * b)  * implicitly drops master on VT switch.
+ *
+ * Even though logind looks like the future, there are a few issues:
+ *  - some platforms don't have equivalent (Android, CrOS, some BSDs) so
+ * root is required _solely_ for SET/DROP MASTER.
+ *  - applications may not be updated to use it,
+ *  - any client which fails to drop master* can DoS the application using
+ * logind, to a varying degree.
+ *
+ * * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER.
+ *
+ *
+ * Here we implement the next best thing:
+ *  - ensure the logind style of fd passing works unchanged, and
+ *  - allow a client to drop/set master, iff it is/was master at a given point
+ * in time.
+ *
+ * Note: DROP_MASTER cannot be free for all, as an arbitrator user could:
+ *  - DoS/crash the arbitrator - details would be implementation specific
+ *  - open the node, become master implicitly and cause issues
+ *
+ * As a result this fixes the following when using root-less build w/o logind
+ * - startx
+ * - weston
+ * - various compositors based on wlroots
+ */
+static int
+drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
+{
+       if (file_priv->pid == task_pid(current) && file_priv->was_master)
+               return 0;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       return 0;
+}
+
 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv)
 {
        int ret = 0;
 
        mutex_lock(&dev->master_mutex);
+
+       ret = drm_master_check_perm(dev, file_priv);
+       if (ret)
+               goto out_unlock;
+
        if (drm_is_current_master(file_priv))
                goto out_unlock;
 
        if (dev->master) {
-               ret = -EINVAL;
+               ret = -EBUSY;
                goto out_unlock;
        }
 
@@ -224,6 +285,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
        int ret = -EINVAL;
 
        mutex_lock(&dev->master_mutex);
+
+       ret = drm_master_check_perm(dev, file_priv);
+       if (ret)
+               goto out_unlock;
+
+       ret = -EINVAL;
        if (!drm_is_current_master(file_priv))
                goto out_unlock;
 
index 121481f6aa7149c668b77c3bb0326dcda2fe9cef..f1dcad96f34170f912f397b7e212165c615ee279 100644 (file)
  *     are underneath planes with higher Z position values. Two planes with the
  *     same Z position value have undefined ordering. Note that the Z position
  *     value can also be immutable, to inform userspace about the hard-coded
- *     stacking of planes, see drm_plane_create_zpos_immutable_property().
+ *     stacking of planes, see drm_plane_create_zpos_immutable_property(). If
+ *     any plane has a zpos property (either mutable or immutable), then all
+ *     planes shall have a zpos property.
  *
  * pixel blend mode:
  *     Pixel blend mode is set up with drm_plane_create_blend_mode_property().
  *              plane does not expose the "alpha" property, then this is
  *              assumed to be 1.0
  *
+ * IN_FORMATS:
+ *     Blob property which contains the set of buffer format and modifier
+ *     pairs supported by this plane. The blob is a drm_format_modifier_blob
+ *     struct. Without this property the plane doesn't support buffers with
+ *     modifiers. Userspace cannot change this property.
+ *
  * Note that all the property extensions described here apply either to the
  * plane or the CRTC (e.g. for the background color, which currently is not
  * exposed and assumed to be black).
@@ -338,10 +346,10 @@ EXPORT_SYMBOL(drm_rotation_simplify);
  * should be set to 0 and max to maximal number of planes for given crtc - 1.
  *
  * If zpos of some planes cannot be changed (like fixed background or
- * cursor/topmost planes), driver should adjust min/max values and assign those
- * planes immutable zpos property with lower or higher values (for more
+ * cursor/topmost planes), drivers shall adjust the min/max values and assign
+ * those planes immutable zpos properties with lower or higher values (for more
  * information, see drm_plane_create_zpos_immutable_property() function). In such
- * case driver should also assign proper initial zpos values for all planes in
+ * case drivers shall also assign proper initial zpos values for all planes in
  * its plane_reset() callback, so the planes will be always sorted properly.
  *
  * See also drm_atomic_normalize_zpos().
index dcabf56983336d1055d59e81fa12d2cfc9ff2b9a..ef26ac57f0394c118ee1cc6bf953e79ee8cb831d 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/nospec.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
@@ -43,7 +44,6 @@
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 
 #include "drm_legacy.h"
index 6b0c6ef8b9b396de2d5d26127f3862352a6e87f5..8cb93f5209a4bc524ce73962f4623c13c6f48735 100644 (file)
@@ -457,10 +457,10 @@ static const struct drm_info_list drm_client_debugfs_list[] = {
        { "internal_clients", drm_client_debugfs_internal_clients, 0 },
 };
 
-int drm_client_debugfs_init(struct drm_minor *minor)
+void drm_client_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(drm_client_debugfs_list,
-                                       ARRAY_SIZE(drm_client_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(drm_client_debugfs_list,
+                                ARRAY_SIZE(drm_client_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
index 644f0ad1067176824dafdaccb3d85a7da83247e0..b1099e1251a265984dc18e2a9aaa020c326344a6 100644 (file)
@@ -1970,6 +1970,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
        else
                drm_reset_display_info(connector);
 
+       drm_update_tile_info(connector, edid);
+
        drm_object_property_set_value(&connector->base,
                                      dev->mode_config.non_desktop_property,
                                      connector->display_info.non_desktop);
@@ -2392,7 +2394,7 @@ EXPORT_SYMBOL(drm_mode_put_tile_group);
  * tile group or NULL if not found.
  */
 struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
-                                              char topology[8])
+                                              const char topology[8])
 {
        struct drm_tile_group *tg;
        int id;
@@ -2422,7 +2424,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group);
  * new tile group or NULL.
  */
 struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
-                                                 char topology[8])
+                                                 const char topology[8])
 {
        struct drm_tile_group *tg;
        int ret;
index 16f2413403aa2fa01bf1df2f4a89fea6c36c4000..da96b2f64d7e4366235131750c926f77973d23b1 100644 (file)
@@ -82,6 +82,7 @@ int drm_mode_setcrtc(struct drm_device *dev,
 /* drm_mode_config.c */
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
+void drm_mode_config_validate(struct drm_device *dev);
 
 /* drm_modes.c */
 const char *drm_get_mode_status_name(enum drm_mode_status status);
@@ -224,7 +225,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
 /* drm_atomic.c */
 #ifdef CONFIG_DEBUG_FS
 struct drm_minor;
-int drm_atomic_debugfs_init(struct drm_minor *minor);
+void drm_atomic_debugfs_init(struct drm_minor *minor);
 #endif
 
 int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
@@ -278,3 +279,4 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
 void drm_reset_display_info(struct drm_connector *connector);
 u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
+void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid);
index 4e673d318503c6f3685e4467e720b4f991c43e00..2bea221307037a1329c13eea997b7527043720e4 100644 (file)
@@ -172,8 +172,8 @@ static const struct file_operations drm_debugfs_fops = {
  * &struct drm_info_list in the given root directory. These files will be removed
  * automatically on drm_debugfs_cleanup().
  */
-int drm_debugfs_create_files(const struct drm_info_list *files, int count,
-                            struct dentry *root, struct drm_minor *minor)
+void drm_debugfs_create_files(const struct drm_info_list *files, int count,
+                             struct dentry *root, struct drm_minor *minor)
 {
        struct drm_device *dev = minor->dev;
        struct drm_info_node *tmp;
@@ -199,7 +199,6 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
                list_add(&tmp->list, &minor->debugfs_list);
                mutex_unlock(&minor->debugfs_lock);
        }
-       return 0;
 }
 EXPORT_SYMBOL(drm_debugfs_create_files);
 
@@ -208,52 +207,28 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 {
        struct drm_device *dev = minor->dev;
        char name[64];
-       int ret;
 
        INIT_LIST_HEAD(&minor->debugfs_list);
        mutex_init(&minor->debugfs_lock);
        sprintf(name, "%d", minor_id);
        minor->debugfs_root = debugfs_create_dir(name, root);
 
-       ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
-                                      minor->debugfs_root, minor);
-       if (ret) {
-               debugfs_remove(minor->debugfs_root);
-               minor->debugfs_root = NULL;
-               DRM_ERROR("Failed to create core drm debugfs files\n");
-               return ret;
-       }
+       drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
 
        if (drm_drv_uses_atomic_modeset(dev)) {
-               ret = drm_atomic_debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("Failed to create atomic debugfs files\n");
-                       return ret;
-               }
+               drm_atomic_debugfs_init(minor);
        }
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = drm_framebuffer_debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("Failed to create framebuffer debugfs file\n");
-                       return ret;
-               }
+               drm_framebuffer_debugfs_init(minor);
 
-               ret = drm_client_debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("Failed to create client debugfs file\n");
-                       return ret;
-               }
+               drm_client_debugfs_init(minor);
        }
 
-       if (dev->driver->debugfs_init) {
-               ret = dev->driver->debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("DRM: Driver failed to initialize "
-                                 "/sys/kernel/debug/dri.\n");
-                       return ret;
-               }
-       }
+       if (dev->driver->debugfs_init)
+               dev->driver->debugfs_init(minor);
+
        return 0;
 }
 
index a7add55a85b43030cc4e5fea56e626df17c9cc85..d07ba54ec94548227ac4679dc509d54ed8c743d4 100644 (file)
@@ -34,9 +34,9 @@
  */
 
 #include <linux/export.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 
 #include "drm_legacy.h"
index 70c4b7afed12494951da8fdaa0132733e87ad8bf..b90cca361afedeb506a08947a117220af28c6ead 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
+#include <linux/iopoll.h>
 
 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
 #include <linux/stacktrace.h>
@@ -687,51 +688,45 @@ static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *
        raw->cur_len = idx;
 }
 
-/* this adds a chunk of msg to the builder to get the final msg */
-static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
-                                     u8 *replybuf, u8 replybuflen, bool hdr)
+static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
+                                         struct drm_dp_sideband_msg_hdr *hdr,
+                                         u8 hdrlen)
 {
-       int ret;
-       u8 crc4;
+       /*
+        * ignore out-of-order messages or messages that are part of a
+        * failed transaction
+        */
+       if (!hdr->somt && !msg->have_somt)
+               return false;
 
-       if (hdr) {
-               u8 hdrlen;
-               struct drm_dp_sideband_msg_hdr recv_hdr;
-               ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
-               if (ret == false) {
-                       print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
-                       return false;
-               }
+       /* get length contained in this portion */
+       msg->curchunk_idx = 0;
+       msg->curchunk_len = hdr->msg_len;
+       msg->curchunk_hdrlen = hdrlen;
 
-               /*
-                * ignore out-of-order messages or messages that are part of a
-                * failed transaction
-                */
-               if (!recv_hdr.somt && !msg->have_somt)
-                       return false;
+       /* we have already gotten an somt - don't bother parsing */
+       if (hdr->somt && msg->have_somt)
+               return false;
 
-               /* get length contained in this portion */
-               msg->curchunk_len = recv_hdr.msg_len;
-               msg->curchunk_hdrlen = hdrlen;
+       if (hdr->somt) {
+               memcpy(&msg->initial_hdr, hdr,
+                      sizeof(struct drm_dp_sideband_msg_hdr));
+               msg->have_somt = true;
+       }
+       if (hdr->eomt)
+               msg->have_eomt = true;
 
-               /* we have already gotten an somt - don't bother parsing */
-               if (recv_hdr.somt && msg->have_somt)
-                       return false;
+       return true;
+}
 
-               if (recv_hdr.somt) {
-                       memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
-                       msg->have_somt = true;
-               }
-               if (recv_hdr.eomt)
-                       msg->have_eomt = true;
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
+                                          u8 *replybuf, u8 replybuflen)
+{
+       u8 crc4;
 
-               /* copy the bytes for the remainder of this header chunk */
-               msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
-               memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
-       } else {
-               memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
-               msg->curchunk_idx += replybuflen;
-       }
+       memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+       msg->curchunk_idx += replybuflen;
 
        if (msg->curchunk_idx >= msg->curchunk_len) {
                /* do CRC */
@@ -1060,13 +1055,12 @@ static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
        drm_dp_encode_sideband_req(&req, msg);
 }
 
-static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
 {
        struct drm_dp_sideband_msg_req_body req;
 
        req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
        drm_dp_encode_sideband_req(&req, msg);
-       return 0;
 }
 
 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
@@ -1203,16 +1197,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
 
                /* remove from q */
                if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
-                   txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+                   txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND)
                        list_del(&txmsg->next);
-               }
-
-               if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
-                   txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
-                       mstb->tx_slots[txmsg->seqno] = NULL;
-               }
-               mgr->is_waiting_for_dwn_reply = false;
-
        }
 out:
        if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -2691,22 +2677,6 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
        struct drm_dp_mst_branch *mstb = txmsg->dst;
        u8 req_type;
 
-       /* both msg slots are full */
-       if (txmsg->seqno == -1) {
-               if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
-                       DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
-                       return -EAGAIN;
-               }
-               if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
-                       txmsg->seqno = mstb->last_seqno;
-                       mstb->last_seqno ^= 1;
-               } else if (mstb->tx_slots[0] == NULL)
-                       txmsg->seqno = 0;
-               else
-                       txmsg->seqno = 1;
-               mstb->tx_slots[txmsg->seqno] = txmsg;
-       }
-
        req_type = txmsg->msg[0] & 0x7f;
        if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
                req_type == DP_RESOURCE_STATUS_NOTIFY)
@@ -2718,7 +2688,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
        hdr->lcr = mstb->lct - 1;
        if (mstb->lct > 1)
                memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
-       hdr->seqno = txmsg->seqno;
+
        return 0;
 }
 /*
@@ -2733,15 +2703,15 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
        int len, space, idx, tosend;
        int ret;
 
+       if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
+               return 0;
+
        memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
 
-       if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
-               txmsg->seqno = -1;
+       if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
                txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
-       }
 
-       /* make hdr from dst mst - for replies use seqno
-          otherwise assign one */
+       /* make hdr from dst mst */
        ret = set_hdr_from_dst_qlock(&hdr, txmsg);
        if (ret < 0)
                return ret;
@@ -2794,42 +2764,17 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
        if (list_empty(&mgr->tx_msg_downq))
                return;
 
-       txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+       txmsg = list_first_entry(&mgr->tx_msg_downq,
+                                struct drm_dp_sideband_msg_tx, next);
        ret = process_single_tx_qlock(mgr, txmsg, false);
-       if (ret == 1) {
-               /* txmsg is sent it should be in the slots now */
-               mgr->is_waiting_for_dwn_reply = true;
-               list_del(&txmsg->next);
-       } else if (ret) {
+       if (ret < 0) {
                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-               mgr->is_waiting_for_dwn_reply = false;
                list_del(&txmsg->next);
-               if (txmsg->seqno != -1)
-                       txmsg->dst->tx_slots[txmsg->seqno] = NULL;
                txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
                wake_up_all(&mgr->tx_waitq);
        }
 }
 
-/* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
-                                      struct drm_dp_sideband_msg_tx *txmsg)
-{
-       int ret;
-
-       /* construct a chunk from the first msg in the tx_msg queue */
-       ret = process_single_tx_qlock(mgr, txmsg, true);
-
-       if (ret != 1)
-               DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-
-       if (txmsg->seqno != -1) {
-               WARN_ON((unsigned int)txmsg->seqno >
-                       ARRAY_SIZE(txmsg->dst->tx_slots));
-               txmsg->dst->tx_slots[txmsg->seqno] = NULL;
-       }
-}
-
 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
                                 struct drm_dp_sideband_msg_tx *txmsg)
 {
@@ -2842,8 +2787,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
        }
 
-       if (list_is_singular(&mgr->tx_msg_downq) &&
-           !mgr->is_waiting_for_dwn_reply)
+       if (list_is_singular(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -3463,7 +3407,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
 
 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
                                    struct drm_dp_mst_branch *mstb,
-                                   int req_type, int seqno, bool broadcast)
+                                   int req_type, bool broadcast)
 {
        struct drm_dp_sideband_msg_tx *txmsg;
 
@@ -3472,13 +3416,11 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
                return -ENOMEM;
 
        txmsg->dst = mstb;
-       txmsg->seqno = seqno;
        drm_dp_encode_up_ack_reply(txmsg, req_type);
 
        mutex_lock(&mgr->qlock);
-
-       process_single_up_tx_qlock(mgr, txmsg);
-
+       /* construct a chunk from the first msg in the tx_msg queue */
+       process_single_tx_qlock(mgr, txmsg, true);
        mutex_unlock(&mgr->qlock);
 
        kfree(txmsg);
@@ -3703,31 +3645,63 @@ out_fail:
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
 
-static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool
+drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+                     struct drm_dp_mst_branch **mstb)
 {
        int len;
        u8 replyblock[32];
        int replylen, curreply;
        int ret;
-       struct drm_dp_sideband_msg_rx *msg;
-       int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
-       msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+       u8 hdrlen;
+       struct drm_dp_sideband_msg_hdr hdr;
+       struct drm_dp_sideband_msg_rx *msg =
+               up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+       int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
+                          DP_SIDEBAND_MSG_DOWN_REP_BASE;
+
+       if (!up)
+               *mstb = NULL;
 
        len = min(mgr->max_dpcd_transaction_bytes, 16);
-       ret = drm_dp_dpcd_read(mgr->aux, basereg,
-                              replyblock, len);
+       ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
        if (ret != len) {
                DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
                return false;
        }
-       ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+
+       ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
+       if (ret == false) {
+               print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
+                              1, replyblock, len, false);
+               DRM_DEBUG_KMS("ERROR: failed header\n");
+               return false;
+       }
+
+       if (!up) {
+               /* Caller is responsible for giving back this reference */
+               *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
+               if (!*mstb) {
+                       DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+                                     hdr.lct);
+                       return false;
+               }
+       }
+
+       if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
+               DRM_DEBUG_KMS("sideband msg set header failed %d\n",
+                             replyblock[0]);
+               return false;
+       }
+
+       replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
+       ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
        if (!ret) {
                DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
                return false;
        }
-       replylen = msg->curchunk_len + msg->curchunk_hdrlen;
 
-       replylen -= len;
+       replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
        curreply = len;
        while (replylen > 0) {
                len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
@@ -3739,7 +3713,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
                        return false;
                }
 
-               ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+               ret = drm_dp_sideband_append_payload(msg, replyblock, len);
                if (!ret) {
                        DRM_DEBUG_KMS("failed to build sideband msg\n");
                        return false;
@@ -3754,67 +3728,60 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 {
        struct drm_dp_sideband_msg_tx *txmsg;
-       struct drm_dp_mst_branch *mstb;
-       struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
-       int slot = -1;
-
-       if (!drm_dp_get_one_sb_msg(mgr, false))
-               goto clear_down_rep_recv;
+       struct drm_dp_mst_branch *mstb = NULL;
+       struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
 
-       if (!mgr->down_rep_recv.have_eomt)
-               return 0;
+       if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
+               goto out;
 
-       mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
-       if (!mstb) {
-               DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
-                             hdr->lct);
-               goto clear_down_rep_recv;
-       }
+       /* Multi-packet message transmission, don't clear the reply */
+       if (!msg->have_eomt)
+               goto out;
 
        /* find the message */
-       slot = hdr->seqno;
        mutex_lock(&mgr->qlock);
-       txmsg = mstb->tx_slots[slot];
-       /* remove from slots */
+       txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
+                                        struct drm_dp_sideband_msg_tx, next);
        mutex_unlock(&mgr->qlock);
 
-       if (!txmsg) {
+       /* Were we actually expecting a response, and from this mstb? */
+       if (!txmsg || txmsg->dst != mstb) {
+               struct drm_dp_sideband_msg_hdr *hdr;
+               hdr = &msg->initial_hdr;
                DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
                              mstb, hdr->seqno, hdr->lct, hdr->rad[0],
-                             mgr->down_rep_recv.msg[0]);
-               goto no_msg;
+                             msg->msg[0]);
+               goto out_clear_reply;
        }
 
-       drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+       drm_dp_sideband_parse_reply(msg, &txmsg->reply);
 
-       if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+       if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
                DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
                              txmsg->reply.req_type,
                              drm_dp_mst_req_type_str(txmsg->reply.req_type),
                              txmsg->reply.u.nak.reason,
                              drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
                              txmsg->reply.u.nak.nak_data);
+       }
 
-       memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+       memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
        drm_dp_mst_topology_put_mstb(mstb);
 
        mutex_lock(&mgr->qlock);
        txmsg->state = DRM_DP_SIDEBAND_TX_RX;
-       mstb->tx_slots[slot] = NULL;
-       mgr->is_waiting_for_dwn_reply = false;
+       list_del(&txmsg->next);
        mutex_unlock(&mgr->qlock);
 
        wake_up_all(&mgr->tx_waitq);
 
        return 0;
 
-no_msg:
-       drm_dp_mst_topology_put_mstb(mstb);
-clear_down_rep_recv:
-       mutex_lock(&mgr->qlock);
-       mgr->is_waiting_for_dwn_reply = false;
-       mutex_unlock(&mgr->qlock);
-       memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out_clear_reply:
+       memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out:
+       if (mstb)
+               drm_dp_mst_topology_put_mstb(mstb);
 
        return 0;
 }
@@ -3890,11 +3857,9 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
 
 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 {
-       struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
        struct drm_dp_pending_up_req *up_req;
-       bool seqno;
 
-       if (!drm_dp_get_one_sb_msg(mgr, true))
+       if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
                goto out;
 
        if (!mgr->up_req_recv.have_eomt)
@@ -3907,7 +3872,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
        }
        INIT_LIST_HEAD(&up_req->next);
 
-       seqno = hdr->seqno;
        drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
 
        if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
@@ -3919,7 +3883,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
        }
 
        drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
-                                seqno, false);
+                                false);
 
        if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
                const struct drm_dp_connection_status_notify *conn_stat =
@@ -3941,7 +3905,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
                              res_stat->available_pbn);
        }
 
-       up_req->hdr = *hdr;
+       up_req->hdr = mgr->up_req_recv.initial_hdr;
        mutex_lock(&mgr->up_req_lock);
        list_add_tail(&up_req->next, &mgr->up_req_list);
        mutex_unlock(&mgr->up_req_lock);
@@ -4046,27 +4010,6 @@ out:
 }
 EXPORT_SYMBOL(drm_dp_mst_detect_port);
 
-/**
- * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
- * @mgr: manager for this port
- * @port: unverified pointer to a port.
- *
- * This returns whether the port supports audio or not.
- */
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
-                                       struct drm_dp_mst_port *port)
-{
-       bool ret = false;
-
-       port = drm_dp_mst_topology_get_port_validated(mgr, port);
-       if (!port)
-               return ret;
-       ret = port->has_audio;
-       drm_dp_mst_topology_put_port(port);
-       return ret;
-}
-EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
-
 /**
  * drm_dp_mst_get_edid() - get EDID for an MST port
  * @connector: toplevel connector to get EDID for
@@ -4443,42 +4386,58 @@ fail:
        return ret;
 }
 
+static int do_get_act_status(struct drm_dp_aux *aux)
+{
+       int ret;
+       u8 status;
+
+       ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+       if (ret < 0)
+               return ret;
+
+       return status;
+}
 
 /**
- * drm_dp_check_act_status() - Check ACT handled status.
+ * drm_dp_check_act_status() - Polls for ACT handled status.
  * @mgr: manager to use
  *
- * Check the payload status bits in the DPCD for ACT handled completion.
+ * Tries waiting for the MST hub to finish updating it's payload table by
+ * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
+ * take that long).
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
  */
 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
 {
-       u8 status;
-       int ret;
-       int count = 0;
-
-       do {
-               ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
-
-               if (ret < 0) {
-                       DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
-                       goto fail;
-               }
-
-               if (status & DP_PAYLOAD_ACT_HANDLED)
-                       break;
-               count++;
-               udelay(100);
-
-       } while (count < 30);
-
-       if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
-               DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
-               ret = -EINVAL;
-               goto fail;
+       /*
+        * There doesn't seem to be any recommended retry count or timeout in
+        * the MST specification. Since some hubs have been observed to take
+        * over 1 second to update their payload allocations under certain
+        * conditions, we use a rather large timeout value.
+        */
+       const int timeout_ms = 3000;
+       int ret, status;
+
+       ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
+                                status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+                                200, timeout_ms * USEC_PER_MSEC);
+       if (ret < 0 && status >= 0) {
+               DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
+                         timeout_ms, status);
+               return -EINVAL;
+       } else if (status < 0) {
+               /*
+                * Failure here isn't unexpected - the hub may have
+                * just been unplugged
+                */
+               DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
+                             status);
+               return status;
        }
+
        return 0;
-fail:
-       return ret;
 }
 EXPORT_SYMBOL(drm_dp_check_act_status);
 
@@ -4669,28 +4628,18 @@ static void drm_dp_tx_work(struct work_struct *work)
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
        mutex_lock(&mgr->qlock);
-       if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
+       if (!list_empty(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
 
-static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
 {
-       if (!port->connector)
-               return;
-
-       if (port->mgr->cbs->destroy_connector) {
-               port->mgr->cbs->destroy_connector(port->mgr, port->connector);
-       } else {
+       if (port->connector) {
                drm_connector_unregister(port->connector);
                drm_connector_put(port->connector);
        }
-}
-
-static inline void
-drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
-{
-       drm_dp_destroy_connector(port);
 
        drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
        drm_dp_mst_put_port_malloc(port);
@@ -4700,26 +4649,25 @@ static inline void
 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
 {
        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
-       struct drm_dp_mst_port *port, *tmp;
+       struct drm_dp_mst_port *port, *port_tmp;
+       struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
        bool wake_tx = false;
 
        mutex_lock(&mgr->lock);
-       list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+       list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
                list_del(&port->next);
                drm_dp_mst_topology_put_port(port);
        }
        mutex_unlock(&mgr->lock);
 
-       /* drop any tx slots msg */
+       /* drop any tx slot msg */
        mutex_lock(&mstb->mgr->qlock);
-       if (mstb->tx_slots[0]) {
-               mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
-               mstb->tx_slots[0] = NULL;
-               wake_tx = true;
-       }
-       if (mstb->tx_slots[1]) {
-               mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
-               mstb->tx_slots[1] = NULL;
+       list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
+               if (txmsg->dst != mstb)
+                       continue;
+
+               txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+               list_del(&txmsg->next);
                wake_tx = true;
        }
        mutex_unlock(&mstb->mgr->qlock);
index 7b1a628d1f6e3277657a5582c1f196fa1a7fe851..bc38322f306ea24b7b32032f5d1bedcc80354a16 100644 (file)
@@ -39,6 +39,7 @@
 #include <drm/drm_color_mgmt.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mode_object.h>
 #include <drm/drm_print.h>
 
@@ -92,13 +93,27 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
        }
 }
 
+static void drm_minor_alloc_release(struct drm_device *dev, void *data)
+{
+       struct drm_minor *minor = data;
+       unsigned long flags;
+
+       WARN_ON(dev != minor->dev);
+
+       put_device(minor->kdev);
+
+       spin_lock_irqsave(&drm_minor_lock, flags);
+       idr_remove(&drm_minors_idr, minor->index);
+       spin_unlock_irqrestore(&drm_minor_lock, flags);
+}
+
 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
 {
        struct drm_minor *minor;
        unsigned long flags;
        int r;
 
-       minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+       minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
        if (!minor)
                return -ENOMEM;
 
@@ -116,46 +131,20 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
        idr_preload_end();
 
        if (r < 0)
-               goto err_free;
+               return r;
 
        minor->index = r;
 
+       r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
+       if (r)
+               return r;
+
        minor->kdev = drm_sysfs_minor_alloc(minor);
-       if (IS_ERR(minor->kdev)) {
-               r = PTR_ERR(minor->kdev);
-               goto err_index;
-       }
+       if (IS_ERR(minor->kdev))
+               return PTR_ERR(minor->kdev);
 
        *drm_minor_get_slot(dev, type) = minor;
        return 0;
-
-err_index:
-       spin_lock_irqsave(&drm_minor_lock, flags);
-       idr_remove(&drm_minors_idr, minor->index);
-       spin_unlock_irqrestore(&drm_minor_lock, flags);
-err_free:
-       kfree(minor);
-       return r;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
-       struct drm_minor **slot, *minor;
-       unsigned long flags;
-
-       slot = drm_minor_get_slot(dev, type);
-       minor = *slot;
-       if (!minor)
-               return;
-
-       put_device(minor->kdev);
-
-       spin_lock_irqsave(&drm_minor_lock, flags);
-       idr_remove(&drm_minors_idr, minor->index);
-       spin_unlock_irqrestore(&drm_minor_lock, flags);
-
-       kfree(minor);
-       *slot = NULL;
 }
 
 static int drm_minor_register(struct drm_device *dev, unsigned int type)
@@ -270,17 +259,22 @@ void drm_minor_release(struct drm_minor *minor)
  * any other resources allocated at device initialization and drop the driver's
  * reference to &drm_device using drm_dev_put().
  *
- * Note that the lifetime rules for &drm_device instance has still a lot of
- * historical baggage. Hence use the reference counting provided by
- * drm_dev_get() and drm_dev_put() only carefully.
+ * Note that any allocation or resource which is visible to userspace must be
+ * released only when the final drm_dev_put() is called, and not when the
+ * driver is unbound from the underlying physical struct &device. Best to use
+ * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
+ * related functions.
+ *
+ * devres managed resources like devm_kmalloc() can only be used for resources
+ * directly related to the underlying hardware device, and only used in code
+ * paths fully protected by drm_dev_enter() and drm_dev_exit().
  *
  * Display driver example
  * ~~~~~~~~~~~~~~~~~~~~~~
  *
  * The following example shows a typical structure of a DRM display driver.
  * The example focus on the probe() function and the other functions that is
- * almost always present and serves as a demonstration of devm_drm_dev_init()
- * usage with its accompanying drm_driver->release callback.
+ * almost always present and serves as a demonstration of devm_drm_dev_init().
  *
  * .. code-block:: c
  *
@@ -290,19 +284,8 @@ void drm_minor_release(struct drm_minor *minor)
  *             struct clk *pclk;
  *     };
  *
- *     static void driver_drm_release(struct drm_device *drm)
- *     {
- *             struct driver_device *priv = container_of(...);
- *
- *             drm_mode_config_cleanup(drm);
- *             drm_dev_fini(drm);
- *             kfree(priv->userspace_facing);
- *             kfree(priv);
- *     }
- *
  *     static struct drm_driver driver_drm_driver = {
  *             [...]
- *             .release = driver_drm_release,
  *     };
  *
  *     static int driver_probe(struct platform_device *pdev)
@@ -322,13 +305,16 @@ void drm_minor_release(struct drm_minor *minor)
  *
  *             ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
  *             if (ret) {
- *                     kfree(drm);
+ *                     kfree(priv);
  *                     return ret;
  *             }
+ *             drmm_add_final_kfree(drm, priv);
  *
- *             drm_mode_config_init(drm);
+ *             ret = drmm_mode_config_init(drm);
+ *             if (ret)
+ *                     return ret;
  *
- *             priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ *             priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
  *             if (!priv->userspace_facing)
  *                     return -ENOMEM;
  *
@@ -580,6 +566,23 @@ static void drm_fs_inode_free(struct inode *inode)
  *    used.
  */
 
+static void drm_dev_init_release(struct drm_device *dev, void *res)
+{
+       drm_legacy_ctxbitmap_cleanup(dev);
+       drm_legacy_remove_map_hash(dev);
+       drm_fs_inode_free(dev->anon_inode);
+
+       put_device(dev->dev);
+       /* Prevent use-after-free in drm_managed_release when debugging is
+        * enabled. Slightly awkward, but can't really be helped. */
+       dev->dev = NULL;
+       mutex_destroy(&dev->master_mutex);
+       mutex_destroy(&dev->clientlist_mutex);
+       mutex_destroy(&dev->filelist_mutex);
+       mutex_destroy(&dev->struct_mutex);
+       drm_legacy_destroy_members(dev);
+}
+
 /**
  * drm_dev_init - Initialise new DRM device
  * @dev: DRM device
@@ -608,6 +611,9 @@ static void drm_fs_inode_free(struct inode *inode)
  * arbitrary offset, you must supply a &drm_driver.release callback and control
  * the finalization explicitly.
  *
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
+ *
  * RETURNS:
  * 0 on success, or error code on failure.
  */
@@ -629,6 +635,9 @@ int drm_dev_init(struct drm_device *dev,
        dev->dev = get_device(parent);
        dev->driver = driver;
 
+       INIT_LIST_HEAD(&dev->managed.resources);
+       spin_lock_init(&dev->managed.lock);
+
        /* no per-device feature limits by default */
        dev->driver_features = ~0u;
 
@@ -644,26 +653,30 @@ int drm_dev_init(struct drm_device *dev,
        mutex_init(&dev->clientlist_mutex);
        mutex_init(&dev->master_mutex);
 
+       ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+       if (ret)
+               return ret;
+
        dev->anon_inode = drm_fs_inode_new();
        if (IS_ERR(dev->anon_inode)) {
                ret = PTR_ERR(dev->anon_inode);
                DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
-               goto err_free;
+               goto err;
        }
 
        if (drm_core_check_feature(dev, DRIVER_RENDER)) {
                ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
                if (ret)
-                       goto err_minors;
+                       goto err;
        }
 
        ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
        if (ret)
-               goto err_minors;
+               goto err;
 
        ret = drm_legacy_create_map_hash(dev);
        if (ret)
-               goto err_minors;
+               goto err;
 
        drm_legacy_ctxbitmap_init(dev);
 
@@ -671,33 +684,19 @@ int drm_dev_init(struct drm_device *dev,
                ret = drm_gem_init(dev);
                if (ret) {
                        DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
-                       goto err_ctxbitmap;
+                       goto err;
                }
        }
 
        ret = drm_dev_set_unique(dev, dev_name(parent));
        if (ret)
-               goto err_setunique;
+               goto err;
 
        return 0;
 
-err_setunique:
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_destroy(dev);
-err_ctxbitmap:
-       drm_legacy_ctxbitmap_cleanup(dev);
-       drm_legacy_remove_map_hash(dev);
-err_minors:
-       drm_minor_free(dev, DRM_MINOR_PRIMARY);
-       drm_minor_free(dev, DRM_MINOR_RENDER);
-       drm_fs_inode_free(dev->anon_inode);
-err_free:
-       put_device(dev->dev);
-       mutex_destroy(&dev->master_mutex);
-       mutex_destroy(&dev->clientlist_mutex);
-       mutex_destroy(&dev->filelist_mutex);
-       mutex_destroy(&dev->struct_mutex);
-       drm_legacy_destroy_members(dev);
+err:
+       drm_managed_release(dev);
+
        return ret;
 }
 EXPORT_SYMBOL(drm_dev_init);
@@ -714,8 +713,10 @@ static void devm_drm_dev_init_release(void *data)
  * @driver: DRM driver
  *
  * Managed drm_dev_init(). The DRM device initialized with this function is
- * automatically put on driver detach using drm_dev_put(). You must supply a
- * &drm_driver.release callback to control the finalization explicitly.
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
  *
  * RETURNS:
  * 0 on success, or error code on failure.
@@ -726,9 +727,6 @@ int devm_drm_dev_init(struct device *parent,
 {
        int ret;
 
-       if (WARN_ON(!driver->release))
-               return -EINVAL;
-
        ret = drm_dev_init(dev, driver, parent);
        if (ret)
                return ret;
@@ -741,42 +739,28 @@ int devm_drm_dev_init(struct device *parent,
 }
 EXPORT_SYMBOL(devm_drm_dev_init);
 
-/**
- * drm_dev_fini - Finalize a dead DRM device
- * @dev: DRM device
- *
- * Finalize a dead DRM device. This is the converse to drm_dev_init() and
- * frees up all data allocated by it. All driver private data should be
- * finalized first. Note that this function does not free the @dev, that is
- * left to the caller.
- *
- * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
- * from a &drm_driver.release callback.
- */
-void drm_dev_fini(struct drm_device *dev)
+void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+                          size_t size, size_t offset)
 {
-       drm_vblank_cleanup(dev);
-
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_destroy(dev);
-
-       drm_legacy_ctxbitmap_cleanup(dev);
-       drm_legacy_remove_map_hash(dev);
-       drm_fs_inode_free(dev->anon_inode);
+       void *container;
+       struct drm_device *drm;
+       int ret;
 
-       drm_minor_free(dev, DRM_MINOR_PRIMARY);
-       drm_minor_free(dev, DRM_MINOR_RENDER);
+       container = kzalloc(size, GFP_KERNEL);
+       if (!container)
+               return ERR_PTR(-ENOMEM);
 
-       put_device(dev->dev);
+       drm = container + offset;
+       ret = devm_drm_dev_init(parent, drm, driver);
+       if (ret) {
+               kfree(container);
+               return ERR_PTR(ret);
+       }
+       drmm_add_final_kfree(drm, container);
 
-       mutex_destroy(&dev->master_mutex);
-       mutex_destroy(&dev->clientlist_mutex);
-       mutex_destroy(&dev->filelist_mutex);
-       mutex_destroy(&dev->struct_mutex);
-       drm_legacy_destroy_members(dev);
-       kfree(dev->unique);
+       return container;
 }
-EXPORT_SYMBOL(drm_dev_fini);
+EXPORT_SYMBOL(__devm_drm_dev_alloc);
 
 /**
  * drm_dev_alloc - Allocate new DRM device
@@ -816,6 +800,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                return ERR_PTR(ret);
        }
 
+       drmm_add_final_kfree(dev, dev);
+
        return dev;
 }
 EXPORT_SYMBOL(drm_dev_alloc);
@@ -824,12 +810,13 @@ static void drm_dev_release(struct kref *ref)
 {
        struct drm_device *dev = container_of(ref, struct drm_device, ref);
 
-       if (dev->driver->release) {
+       if (dev->driver->release)
                dev->driver->release(dev);
-       } else {
-               drm_dev_fini(dev);
-               kfree(dev);
-       }
+
+       drm_managed_release(dev);
+
+       if (dev->managed.final_kfree)
+               kfree(dev->managed.final_kfree);
 }
 
 /**
@@ -946,6 +933,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        struct drm_driver *driver = dev->driver;
        int ret;
 
+       if (!driver->load)
+               drm_mode_config_validate(dev);
+
+       WARN_ON(!dev->managed.final_kfree);
+
        if (drm_dev_needs_global_mutex(dev))
                mutex_lock(&drm_global_mutex);
 
@@ -1046,8 +1038,8 @@ EXPORT_SYMBOL(drm_dev_unregister);
  */
 int drm_dev_set_unique(struct drm_device *dev, const char *name)
 {
-       kfree(dev->unique);
-       dev->unique = kstrdup(name, GFP_KERNEL);
+       drmm_kfree(dev, dev->unique);
+       dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
 
        return dev->unique ? 0 : -ENOMEM;
 }
index 116451101426d40e7acf0c8b002ed4bba823378e..3bd95c4b02eba92a5d8cac88f95647509fb54146 100644 (file)
@@ -1583,8 +1583,6 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
 MODULE_PARM_DESC(edid_fixup,
                 "Minimum number of valid EDID header bytes (0-8, default 6)");
 
-static void drm_get_displayid(struct drm_connector *connector,
-                             struct edid *edid);
 static int validate_displayid(u8 *displayid, int length, int idx);
 
 static int drm_edid_block_checksum(const u8 *raw_edid)
@@ -2018,18 +2016,13 @@ EXPORT_SYMBOL(drm_probe_ddc);
 struct edid *drm_get_edid(struct drm_connector *connector,
                          struct i2c_adapter *adapter)
 {
-       struct edid *edid;
-
        if (connector->force == DRM_FORCE_OFF)
                return NULL;
 
        if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
                return NULL;
 
-       edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
-       if (edid)
-               drm_get_displayid(connector, edid);
-       return edid;
+       return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
 }
 EXPORT_SYMBOL(drm_get_edid);
 
@@ -2387,6 +2380,14 @@ bad_std_timing(u8 a, u8 b)
               (a == 0x20 && b == 0x20);
 }
 
+static int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+       if (mode->htotal <= 0)
+               return 0;
+
+       return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
+}
+
 /**
  * drm_mode_std - convert standard mode info (width, height, refresh) into mode
  * @connector: connector of for the EDID block
@@ -3212,16 +3213,33 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
 }
 
 
-static u8 *drm_find_displayid_extension(const struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid,
+                                       int *length, int *idx)
 {
-       return drm_find_edid_extension(edid, DISPLAYID_EXT);
+       u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT);
+       struct displayid_hdr *base;
+       int ret;
+
+       if (!displayid)
+               return NULL;
+
+       /* EDID extensions block checksum isn't for us */
+       *length = EDID_LENGTH - 1;
+       *idx = 1;
+
+       ret = validate_displayid(displayid, *length, *idx);
+       if (ret)
+               return NULL;
+
+       base = (struct displayid_hdr *)&displayid[*idx];
+       *length = *idx + sizeof(*base) + base->bytes;
+
+       return displayid;
 }
 
 static u8 *drm_find_cea_extension(const struct edid *edid)
 {
-       int ret;
-       int idx = 1;
-       int length = EDID_LENGTH;
+       int length, idx;
        struct displayid_block *block;
        u8 *cea;
        u8 *displayid;
@@ -3232,14 +3250,10 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
                return cea;
 
        /* CEA blocks can also be found embedded in a DisplayID block */
-       displayid = drm_find_displayid_extension(edid);
+       displayid = drm_find_displayid_extension(edid, &length, &idx);
        if (!displayid)
                return NULL;
 
-       ret = validate_displayid(displayid, length, idx);
-       if (ret)
-               return NULL;
-
        idx += sizeof(struct displayid_hdr);
        for_each_displayid_db(displayid, block, idx, length) {
                if (block->tag == DATA_BLOCK_CTA) {
@@ -5084,7 +5098,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
 
 static int validate_displayid(u8 *displayid, int length, int idx)
 {
-       int i;
+       int i, dispid_length;
        u8 csum = 0;
        struct displayid_hdr *base;
 
@@ -5093,15 +5107,18 @@ static int validate_displayid(u8 *displayid, int length, int idx)
        DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
                      base->rev, base->bytes, base->prod_id, base->ext_count);
 
-       if (base->bytes + 5 > length - idx)
+       /* +1 for DispID checksum */
+       dispid_length = sizeof(*base) + base->bytes + 1;
+       if (dispid_length > length - idx)
                return -EINVAL;
-       for (i = idx; i <= base->bytes + 5; i++) {
-               csum += displayid[i];
-       }
+
+       for (i = 0; i < dispid_length; i++)
+               csum += displayid[idx + i];
        if (csum) {
                DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
                return -EINVAL;
        }
+
        return 0;
 }
 
@@ -5180,20 +5197,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
                                        struct edid *edid)
 {
        u8 *displayid;
-       int ret;
-       int idx = 1;
-       int length = EDID_LENGTH;
+       int length, idx;
        struct displayid_block *block;
        int num_modes = 0;
 
-       displayid = drm_find_displayid_extension(edid);
+       displayid = drm_find_displayid_extension(edid, &length, &idx);
        if (!displayid)
                return 0;
 
-       ret = validate_displayid(displayid, length, idx);
-       if (ret)
-               return 0;
-
        idx += sizeof(struct displayid_hdr);
        for_each_displayid_db(displayid, block, idx, length) {
                switch (block->tag) {
@@ -5782,9 +5793,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
 EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
 
 static int drm_parse_tiled_block(struct drm_connector *connector,
-                                struct displayid_block *block)
+                                const struct displayid_block *block)
 {
-       struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+       const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
        u16 w, h;
        u8 tile_v_loc, tile_h_loc;
        u8 num_v_tile, num_h_tile;
@@ -5835,22 +5846,12 @@ static int drm_parse_tiled_block(struct drm_connector *connector,
        return 0;
 }
 
-static int drm_parse_display_id(struct drm_connector *connector,
-                               u8 *displayid, int length,
-                               bool is_edid_extension)
+static int drm_displayid_parse_tiled(struct drm_connector *connector,
+                                    const u8 *displayid, int length, int idx)
 {
-       /* if this is an EDID extension the first byte will be 0x70 */
-       int idx = 0;
-       struct displayid_block *block;
+       const struct displayid_block *block;
        int ret;
 
-       if (is_edid_extension)
-               idx = 1;
-
-       ret = validate_displayid(displayid, length, idx);
-       if (ret)
-               return ret;
-
        idx += sizeof(struct displayid_hdr);
        for_each_displayid_db(displayid, block, idx, length) {
                DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
@@ -5862,12 +5863,6 @@ static int drm_parse_display_id(struct drm_connector *connector,
                        if (ret)
                                return ret;
                        break;
-               case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
-                       /* handled in mode gathering code. */
-                       break;
-               case DATA_BLOCK_CTA:
-                       /* handled in the cea parser code. */
-                       break;
                default:
                        DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
                        break;
@@ -5876,19 +5871,21 @@ static int drm_parse_display_id(struct drm_connector *connector,
        return 0;
 }
 
-static void drm_get_displayid(struct drm_connector *connector,
-                             struct edid *edid)
+void drm_update_tile_info(struct drm_connector *connector,
+                         const struct edid *edid)
 {
-       void *displayid = NULL;
+       const void *displayid = NULL;
+       int length, idx;
        int ret;
+
        connector->has_tile = false;
-       displayid = drm_find_displayid_extension(edid);
+       displayid = drm_find_displayid_extension(edid, &length, &idx);
        if (!displayid) {
                /* drop reference to any tile group we had */
                goto out_drop_ref;
        }
 
-       ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+       ret = drm_displayid_parse_tiled(connector, displayid, length, idx);
        if (ret < 0)
                goto out_drop_ref;
        if (!connector->has_tile)
index 9801c0333eca29e937a1877525976bd572378547..cb2349ad338d953bbdd56896dc7ee08e1ab6d3a3 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * drm kms/fb cma (contiguous memory allocator) helper functions
  *
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
  *   Author: Lars-Peter Clausen <lars@metafoo.de>
  *
  * Based on udl_fbdev.c
index a9771de4d17e64816964c8a7244a46216814cd0e..02fc240268729a07cf00dcd4bf587dbc3ef52654 100644 (file)
@@ -514,6 +514,14 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
        if (ret)
                goto err_release;
 
+       /*
+        * TODO: We really should be smarter here and alloc an apperture
+        * for each IORESOURCE_MEM resource helper->dev->dev has and also
+        * init the ranges of the appertures based on the resources.
+        * Note some drivers currently count on there being only 1 empty
+        * aperture and fill this themselves, these will need to be dealt
+        * with somehow when fixing this.
+        */
        info->apertures = alloc_apertures(1);
        if (!info->apertures) {
                ret = -ENOMEM;
@@ -2162,6 +2170,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
  *
  * This function sets up generic fbdev emulation for drivers that supports
  * dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_generic_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
  *
  * Restore, hotplug events and teardown are all taken care of. Drivers that do
  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2178,29 +2188,30 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
  * Setup will be retried on the next hotplug event.
  *
  * The fbdev is destroyed by drm_dev_unregister().
- *
- * Returns:
- * Zero on success or negative error code on failure.
  */
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+void drm_fbdev_generic_setup(struct drm_device *dev,
+                            unsigned int preferred_bpp)
 {
        struct drm_fb_helper *fb_helper;
        int ret;
 
-       WARN(dev->fb_helper, "fb_helper is already set!\n");
+       drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+       drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
 
        if (!drm_fbdev_emulation)
-               return 0;
+               return;
 
        fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
-       if (!fb_helper)
-               return -ENOMEM;
+       if (!fb_helper) {
+               drm_err(dev, "Failed to allocate fb_helper\n");
+               return;
+       }
 
        ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
        if (ret) {
                kfree(fb_helper);
                drm_err(dev, "Failed to register client: %d\n", ret);
-               return ret;
+               return;
        }
 
        if (!preferred_bpp)
@@ -2214,8 +2225,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
                drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
 
        drm_client_register(&fb_helper->client);
-
-       return 0;
 }
 EXPORT_SYMBOL(drm_fbdev_generic_setup);
 
index eb009d3ab48fa318ddd51dae56784d288ccef17d..7194e67e78bd2412bad54eab4dc5e1b5eeced463 100644 (file)
@@ -613,7 +613,8 @@ put_back_event:
                                file_priv->event_space -= length;
                                list_add(&e->link, &file_priv->event_list);
                                spin_unlock_irq(&dev->event_lock);
-                               wake_up_interruptible(&file_priv->event_wait);
+                               wake_up_interruptible_poll(&file_priv->event_wait,
+                                       EPOLLIN | EPOLLRDNORM);
                                break;
                        }
 
@@ -809,7 +810,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
        list_del(&e->pending_link);
        list_add_tail(&e->link,
                      &e->file_priv->event_list);
-       wake_up_interruptible(&e->file_priv->event_wait);
+       wake_up_interruptible_poll(&e->file_priv->event_wait,
+               EPOLLIN | EPOLLRDNORM);
 }
 EXPORT_SYMBOL(drm_send_event_locked);
 
index 57ac94ce9b9eefc3615f1399b9662a1e1f88827a..0375b3d7f8d0fd5ef57d85e3c48a539c6865ffe6 100644 (file)
@@ -1207,10 +1207,10 @@ static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
        { "framebuffer", drm_framebuffer_info, 0 },
 };
 
-int drm_framebuffer_debugfs_init(struct drm_minor *minor)
+void drm_framebuffer_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(drm_framebuffer_debugfs_list,
-                               ARRAY_SIZE(drm_framebuffer_debugfs_list),
-                               minor->debugfs_root, minor);
+       drm_debugfs_create_files(drm_framebuffer_debugfs_list,
+                                ARRAY_SIZE(drm_framebuffer_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
index 37627d06fb0609e4dc9dcab6b33f981a280087b9..7bf628e130232e69a2ba0674ea2843f4e17ebd6a 100644 (file)
@@ -44,6 +44,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_print.h>
 #include <drm/drm_vma_manager.h>
 
  * up at a later date, and as our interface with shmfs for memory allocation.
  */
 
+static void
+drm_gem_init_release(struct drm_device *dev, void *ptr)
+{
+       drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+}
+
 /**
  * drm_gem_init - Initialize the GEM device fields
  * @dev: drm_devic structure to initialize
@@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
        mutex_init(&dev->object_name_lock);
        idr_init_base(&dev->object_name_idr, 1);
 
-       vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+       vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
+                                         GFP_KERNEL);
        if (!vma_offset_manager) {
                DRM_ERROR("out of memory\n");
                return -ENOMEM;
@@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
                                    DRM_FILE_PAGE_OFFSET_START,
                                    DRM_FILE_PAGE_OFFSET_SIZE);
 
-       return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
-
-       drm_vma_offset_manager_destroy(dev->vma_offset_manager);
-       kfree(dev->vma_offset_manager);
-       dev->vma_offset_manager = NULL;
+       return drmm_add_action(dev, drm_gem_init_release, NULL);
 }
 
 /**
@@ -432,7 +431,7 @@ err_unref:
  * drm_gem_handle_create - create a gem handle for an object
  * @file_priv: drm file-private structure to register the handle for
  * @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
  *
  * Create a handle for this object. This adds a handle reference to the object,
  * which includes a regular reference count. Callers will likely want to
index 3a7ace19a9021c1dfd7d761e0244725e7c77b429..ccc2c71fa49140915928335c5da8e5460d337a9f 100644 (file)
 #include <drm/drm_modeset_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 
+#define AFBC_HEADER_SIZE               16
+#define AFBC_TH_LAYOUT_ALIGNMENT       8
+#define AFBC_HDR_ALIGN                 64
+#define AFBC_SUPERBLOCK_PIXELS         256
+#define AFBC_SUPERBLOCK_ALIGNMENT      128
+#define AFBC_TH_BODY_START_ALIGNMENT   4096
+
 /**
  * DOC: overview
  *
@@ -54,32 +61,25 @@ struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
 
-static struct drm_framebuffer *
-drm_gem_fb_alloc(struct drm_device *dev,
+static int
+drm_gem_fb_init(struct drm_device *dev,
+                struct drm_framebuffer *fb,
                 const struct drm_mode_fb_cmd2 *mode_cmd,
                 struct drm_gem_object **obj, unsigned int num_planes,
                 const struct drm_framebuffer_funcs *funcs)
 {
-       struct drm_framebuffer *fb;
        int ret, i;
 
-       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
-       if (!fb)
-               return ERR_PTR(-ENOMEM);
-
        drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
 
        for (i = 0; i < num_planes; i++)
                fb->obj[i] = obj[i];
 
        ret = drm_framebuffer_init(dev, fb, funcs);
-       if (ret) {
+       if (ret)
                drm_err(dev, "Failed to init framebuffer: %d\n", ret);
-               kfree(fb);
-               return ERR_PTR(ret);
-       }
 
-       return fb;
+       return ret;
 }
 
 /**
@@ -123,10 +123,13 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
 EXPORT_SYMBOL(drm_gem_fb_create_handle);
 
 /**
- * drm_gem_fb_create_with_funcs() - Helper function for the
- *                                  &drm_mode_config_funcs.fb_create
- *                                  callback
+ * drm_gem_fb_init_with_funcs() - Helper function for implementing
+ *                               &drm_mode_config_funcs.fb_create
+ *                               callback in cases when the driver
+ *                               allocates a subclass of
+ *                               struct drm_framebuffer
  * @dev: DRM device
+ * @fb: framebuffer object
  * @file: DRM file that holds the GEM handle(s) backing the framebuffer
  * @mode_cmd: Metadata from the userspace framebuffer creation request
  * @funcs: vtable to be used for the new framebuffer object
@@ -134,23 +137,26 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
  * This function can be used to set &drm_framebuffer_funcs for drivers that need
  * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
  * change &drm_framebuffer_funcs. The function does buffer size validation.
+ * The buffer size validation is for a general case, though, so users should
+ * pay attention to the checks being appropriate for them or, at least,
+ * non-conflicting.
  *
  * Returns:
- * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ * Zero or a negative error code.
  */
-struct drm_framebuffer *
-drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
-                            const struct drm_mode_fb_cmd2 *mode_cmd,
-                            const struct drm_framebuffer_funcs *funcs)
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+                              struct drm_framebuffer *fb,
+                              struct drm_file *file,
+                              const struct drm_mode_fb_cmd2 *mode_cmd,
+                              const struct drm_framebuffer_funcs *funcs)
 {
        const struct drm_format_info *info;
        struct drm_gem_object *objs[4];
-       struct drm_framebuffer *fb;
        int ret, i;
 
        info = drm_get_format_info(dev, mode_cmd);
        if (!info)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        for (i = 0; i < info->num_planes; i++) {
                unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
@@ -175,19 +181,55 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
                }
        }
 
-       fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
-       if (IS_ERR(fb)) {
-               ret = PTR_ERR(fb);
+       ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+       if (ret)
                goto err_gem_object_put;
-       }
 
-       return fb;
+       return 0;
 
 err_gem_object_put:
        for (i--; i >= 0; i--)
                drm_gem_object_put_unlocked(objs[i]);
 
-       return ERR_PTR(ret);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
+
+/**
+ * drm_gem_fb_create_with_funcs() - Helper function for the
+ *                                  &drm_mode_config_funcs.fb_create
+ *                                  callback
+ * @dev: DRM device
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function can be used to set &drm_framebuffer_funcs for drivers that need
+ * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
+ * change &drm_framebuffer_funcs. The function does buffer size validation.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+                            const struct drm_mode_fb_cmd2 *mode_cmd,
+                            const struct drm_framebuffer_funcs *funcs)
+{
+       struct drm_framebuffer *fb;
+       int ret;
+
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
+               return ERR_PTR(-ENOMEM);
+
+       ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+       if (ret) {
+               kfree(fb);
+               return ERR_PTR(ret);
+       }
+
+       return fb;
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
 
@@ -265,6 +307,132 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
 
+static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+                                 const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       const struct drm_format_info *info;
+
+       info = drm_get_format_info(dev, mode_cmd);
+
+       /* use whatever a driver has set */
+       if (info->cpp[0])
+               return info->cpp[0] * 8;
+
+       /* guess otherwise */
+       switch (info->format) {
+       case DRM_FORMAT_YUV420_8BIT:
+               return 12;
+       case DRM_FORMAT_YUV420_10BIT:
+               return 15;
+       case DRM_FORMAT_VUY101010:
+               return 30;
+       default:
+               break;
+       }
+
+       /* all attempts failed */
+       return 0;
+}
+
+static int drm_gem_afbc_min_size(struct drm_device *dev,
+                                const struct drm_mode_fb_cmd2 *mode_cmd,
+                                struct drm_afbc_framebuffer *afbc_fb)
+{
+       __u32 n_blocks, w_alignment, h_alignment, hdr_alignment;
+       /* remove bpp when all users properly encode cpp in drm_format_info */
+       __u32 bpp;
+
+       switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+               afbc_fb->block_width = 16;
+               afbc_fb->block_height = 16;
+               break;
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
+               afbc_fb->block_width = 32;
+               afbc_fb->block_height = 8;
+               break;
+       /* no user exists yet - fall through */
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
+       default:
+               drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
+                           mode_cmd->modifier[0]
+                           & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
+               return -EINVAL;
+       }
+
+       /* tiled header afbc */
+       w_alignment = afbc_fb->block_width;
+       h_alignment = afbc_fb->block_height;
+       hdr_alignment = AFBC_HDR_ALIGN;
+       if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) {
+               w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+               h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+               hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT;
+       }
+
+       afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment);
+       afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
+       afbc_fb->offset = mode_cmd->offsets[0];
+
+       bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+       if (!bpp) {
+               drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
+               return -EINVAL;
+       }
+
+       n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height)
+                  / AFBC_SUPERBLOCK_PIXELS;
+       afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment);
+       afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8,
+                                              AFBC_SUPERBLOCK_ALIGNMENT);
+
+       return 0;
+}
+
+/**
+ * drm_gem_fb_afbc_init() - Helper function for drivers using afbc to
+ *                         fill and validate all the afbc-specific
+ *                         struct drm_afbc_framebuffer members
+ *
+ * @dev: DRM device
+ * @afbc_fb: afbc-specific framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @afbc_fb: afbc framebuffer
+ *
+ * This function can be used by drivers which support afbc to complete
+ * the preparation of struct drm_afbc_framebuffer. It must be called after
+ * allocating the said struct and calling drm_gem_fb_init_with_funcs().
+ * It is caller's responsibility to put afbc_fb->base.obj objects in case
+ * the call is unsuccessful.
+ *
+ * Returns:
+ * Zero on success or a negative error value on failure.
+ */
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+                        const struct drm_mode_fb_cmd2 *mode_cmd,
+                        struct drm_afbc_framebuffer *afbc_fb)
+{
+       const struct drm_format_info *info;
+       struct drm_gem_object **objs;
+       int ret;
+
+       objs = afbc_fb->base.obj;
+       info = drm_get_format_info(dev, mode_cmd);
+       if (!info)
+               return -EINVAL;
+
+       ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+       if (ret < 0)
+               return ret;
+
+       if (objs[0]->size < afbc_fb->afbc_size)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
+
 /**
  * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
  * @plane: Plane
index 92a11bb42365da6d2793a8e2d673bae7446b8cd0..8b2d5c945c95cf4f5772532ef74bf71ad760d81c 100644 (file)
@@ -1,10 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 
+#include <linux/module.h>
+
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_gem_ttm_helper.h>
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_mode.h>
@@ -18,13 +21,93 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
 /**
  * DOC: overview
  *
- * This library provides a GEM buffer object that is backed by video RAM
- * (VRAM). It can be used for framebuffer devices with dedicated memory.
+ * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
+ * buffer object that is backed by video RAM (VRAM). It can be used for
+ * framebuffer devices with dedicated memory.
  *
  * The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. The rsp.
- * buffer object is provided by &struct drm_gem_vram_object.
+ * manager for simple framebuffer devices with dedicated video memory. GEM
+ * VRAM buffer objects are either placed in the video memory or remain evicted
+ * to system memory.
+ *
+ * With the GEM interface userspace applications create, manage and destroy
+ * graphics buffers, such as an on-screen framebuffer. GEM does not provide
+ * an implementation of these interfaces. It's up to the DRM driver to
+ * provide an implementation that suits the hardware. If the hardware device
+ * contains dedicated video memory, the DRM driver can use the VRAM helper
+ * library. Each active buffer object is stored in video RAM. Active
+ * buffer are used for drawing the current frame, typically something like
+ * the frame's scanout buffer or the cursor image. If there's no more space
+ * left in VRAM, inactive GEM objects can be moved to system memory.
+ *
+ * The easiest way to use the VRAM helper library is to call
+ * drm_vram_helper_alloc_mm(). The function allocates and initializes an
+ * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
+ * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
+ * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
+ * as illustrated below.
+ *
+ * .. code-block:: c
+ *
+ *     struct file_operations fops ={
+ *             .owner = THIS_MODULE,
+ *             DRM_VRAM_MM_FILE_OPERATION
+ *     };
+ *     struct drm_driver drv = {
+ *             .driver_feature = DRM_ ... ,
+ *             .fops = &fops,
+ *             DRM_GEM_VRAM_DRIVER
+ *     };
+ *
+ *     int init_drm_driver()
+ *     {
+ *             struct drm_device *dev;
+ *             uint64_t vram_base;
+ *             unsigned long vram_size;
+ *             int ret;
+ *
+ *             // setup device, vram base and size
+ *             // ...
+ *
+ *             ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
+ *             if (ret)
+ *                     return ret;
+ *             return 0;
+ *     }
+ *
+ * This creates an instance of &struct drm_vram_mm, exports DRM userspace
+ * interfaces for GEM buffer management and initializes file operations to
+ * allow for accessing created GEM buffers. With this setup, the DRM driver
+ * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
+ * to userspace.
+ *
+ * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
+ * in the driver's clean-up code.
+ *
+ * .. code-block:: c
+ *
+ *     void fini_drm_driver()
+ *     {
+ *             struct drm_device *dev = ...;
+ *
+ *             drm_vram_helper_release_mm(dev);
+ *     }
+ *
+ * For drawing or scanout operations, buffer object have to be pinned in video
+ * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
+ * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
+ * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
+ *
+ * A buffer object that is pinned in video RAM has a fixed address within that
+ * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
+ * it's used to program the hardware's scanout engine for framebuffers, set
+ * the cursor overlay's image for a mouse cursor, or use it as input to the
+ * hardware's draing engine.
+ *
+ * To access a buffer object's memory from the DRM driver, call
+ * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
+ * space and returns the memory address. Use drm_gem_vram_kunmap() to
+ * release the mapping.
  */
 
 /*
@@ -670,9 +753,9 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
  * @plane:     a DRM plane
  * @new_state: the plane's new state
  *
- * During plane updates, this function pins the GEM VRAM
- * objects of the plane's new framebuffer to VRAM. Call
- * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ * During plane updates, this function sets the plane's fence and
+ * pins the GEM VRAM objects of the plane's new framebuffer to VRAM.
+ * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
  *
  * Returns:
  *     0 on success, or
@@ -698,6 +781,10 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
                        goto err_drm_gem_vram_unpin;
        }
 
+       ret = drm_gem_fb_prepare_fb(plane, new_state);
+       if (ret)
+               goto err_drm_gem_vram_unpin;
+
        return 0;
 
 err_drm_gem_vram_unpin:
@@ -1018,7 +1105,6 @@ static struct ttm_bo_driver bo_driver = {
  * struct drm_vram_mm
  */
 
-#if defined(CONFIG_DEBUG_FS)
 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1035,27 +1121,18 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
        { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
 };
-#endif
 
 /**
  * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
  *
  * @minor: drm minor device.
  *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
  */
-int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+void drm_vram_mm_debugfs_init(struct drm_minor *minor)
 {
-       int ret = 0;
-
-#if defined(CONFIG_DEBUG_FS)
-       ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
-                                      ARRAY_SIZE(drm_vram_mm_debugfs_list),
-                                      minor->debugfs_root, minor);
-#endif
-       return ret;
+       drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+                                ARRAY_SIZE(drm_vram_mm_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
 
@@ -1202,3 +1279,6 @@ drm_vram_helper_mode_valid(struct drm_device *dev,
        return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
 }
 EXPORT_SYMBOL(drm_vram_helper_mode_valid);
+
+MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
+MODULE_LICENSE("GPL");
index 5714a78365ac6e14c716fa0836acad2a18b3f3a3..2470a352730b505e2698030bf6aee3734f5a33b0 100644 (file)
@@ -89,9 +89,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
 struct drm_minor *drm_minor_acquire(unsigned int minor_id);
 void drm_minor_release(struct drm_minor *minor);
 
+/* drm_managed.c */
+void drm_managed_release(struct drm_device *dev);
+
 /* drm_vblank.c */
 void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
-void drm_vblank_cleanup(struct drm_device *dev);
 
 /* IOCTLS */
 int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
@@ -141,7 +143,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
 /* drm_gem.c */
 struct drm_gem_object;
 int drm_gem_init(struct drm_device *dev);
-void drm_gem_destroy(struct drm_device *dev);
 int drm_gem_handle_create_tail(struct drm_file *file_priv,
                               struct drm_gem_object *obj,
                               u32 *handlep);
@@ -235,4 +236,4 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
 /* drm_framebuffer.c */
 void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
                                const struct drm_framebuffer *fb);
-int drm_framebuffer_debugfs_init(struct drm_minor *minor);
+void drm_framebuffer_debugfs_init(struct drm_minor *minor);
index 9e41972c4bbc3ddd7c4b3c9083a3575e9f380f08..73e31dd4e442ac7130bb6f2be6ad37598bbf2576 100644 (file)
@@ -599,8 +599,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
 
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
new file mode 100644 (file)
index 0000000..9cebfe3
--- /dev/null
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel
+ *
+ * Based on drivers/base/devres.c
+ */
+
+#include <drm/drm_managed.h>
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: managed resources
+ *
+ * Inspired by struct &device managed resources, but tied to the lifetime of
+ * struct &drm_device, which can outlive the underlying physical device, usually
+ * when userspace has some open files and other handles to resources still open.
+ *
+ * Release actions can be added with drmm_add_action(), memory allocations can
+ * be done directly with drmm_kmalloc() and the related functions. Everything
+ * will be released on the final drm_dev_put() in reverse order of how the
+ * release actions have been added and memory has been allocated since driver
+ * loading started with drm_dev_init().
+ *
+ * Note that release actions and managed memory can also be added and removed
+ * during the lifetime of the driver, all the functions are fully concurrent
+ * safe. But it is recommended to use managed resources only for resources that
+ * change rarely, if ever, during the lifetime of the &drm_device instance.
+ */
+
+struct drmres_node {
+       struct list_head        entry;
+       drmres_release_t        release;
+       const char              *name;
+       size_t                  size;
+};
+
+struct drmres {
+       struct drmres_node              node;
+       /*
+        * Some archs want to perform DMA into kmalloc caches
+        * and need a guaranteed alignment larger than
+        * the alignment of a 64-bit integer.
+        * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+        * buffer alignment as if it was allocated by plain kmalloc().
+        */
+       u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+};
+
+static void free_dr(struct drmres *dr)
+{
+       kfree_const(dr->node.name);
+       kfree(dr);
+}
+
+void drm_managed_release(struct drm_device *dev)
+{
+       struct drmres *dr, *tmp;
+
+       drm_dbg_drmres(dev, "drmres release begin\n");
+       list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
+               drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
+                              dr, dr->node.name, dr->node.size);
+
+               if (dr->node.release)
+                       dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
+
+               list_del(&dr->node.entry);
+               free_dr(dr);
+       }
+       drm_dbg_drmres(dev, "drmres release end\n");
+}
+
+/*
+ * Always inline so that kmalloc_track_caller tracks the actual interesting
+ * caller outside of drm_managed.c.
+ */
+static __always_inline struct drmres * alloc_dr(drmres_release_t release,
+                                               size_t size, gfp_t gfp, int nid)
+{
+       size_t tot_size;
+       struct drmres *dr;
+
+       /* We must catch any near-SIZE_MAX cases that could overflow. */
+       if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
+               return NULL;
+
+       dr = kmalloc_node_track_caller(tot_size, gfp, nid);
+       if (unlikely(!dr))
+               return NULL;
+
+       memset(dr, 0, offsetof(struct drmres, data));
+
+       INIT_LIST_HEAD(&dr->node.entry);
+       dr->node.release = release;
+       dr->node.size = size;
+
+       return dr;
+}
+
+static void del_dr(struct drm_device *dev, struct drmres *dr)
+{
+       list_del_init(&dr->node.entry);
+
+       drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
+                      dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+static void add_dr(struct drm_device *dev, struct drmres *dr)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->managed.lock, flags);
+       list_add(&dr->node.entry, &dev->managed.resources);
+       spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+       drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
+                      dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+/**
+ * drmm_add_final_kfree - add release action for the final kfree()
+ * @dev: DRM device
+ * @container: pointer to the kmalloc allocation containing @dev
+ *
+ * Since the allocation containing the struct &drm_device must be allocated
+ * before it can be initialized with drm_dev_init() there's no way to allocate
+ * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the
+ * pointer for this final kfree() must be specified by calling this function. It
+ * will be released in the final drm_dev_put() for @dev, after all other release
+ * actions installed through drmm_add_action() have been processed.
+ */
+void drmm_add_final_kfree(struct drm_device *dev, void *container)
+{
+       WARN_ON(dev->managed.final_kfree);
+       WARN_ON(dev < (struct drm_device *) container);
+       WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
+       dev->managed.final_kfree = container;
+}
+EXPORT_SYMBOL(drmm_add_final_kfree);
+
+int __drmm_add_action(struct drm_device *dev,
+                     drmres_release_t action,
+                     void *data, const char *name)
+{
+       struct drmres *dr;
+       void **void_ptr;
+
+       dr = alloc_dr(action, data ? sizeof(void*) : 0,
+                     GFP_KERNEL | __GFP_ZERO,
+                     dev_to_node(dev->dev));
+       if (!dr) {
+               drm_dbg_drmres(dev, "failed to add action %s for %p\n",
+                              name, data);
+               return -ENOMEM;
+       }
+
+       dr->node.name = kstrdup_const(name, GFP_KERNEL);
+       if (data) {
+               void_ptr = (void **)&dr->data;
+               *void_ptr = data;
+       }
+
+       add_dr(dev, dr);
+
+       return 0;
+}
+EXPORT_SYMBOL(__drmm_add_action);
+
+int __drmm_add_action_or_reset(struct drm_device *dev,
+                              drmres_release_t action,
+                              void *data, const char *name)
+{
+       int ret;
+
+       ret = __drmm_add_action(dev, action, data, name);
+       if (ret)
+               action(dev, data);
+
+       return ret;
+}
+EXPORT_SYMBOL(__drmm_add_action_or_reset);
+
+/**
+ * drmm_kmalloc - &drm_device managed kmalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+       struct drmres *dr;
+
+       dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
+       if (!dr) {
+               drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
+                              size, gfp);
+               return NULL;
+       }
+       dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+
+       add_dr(dev, dr);
+
+       return dr->data;
+}
+EXPORT_SYMBOL(drmm_kmalloc);
+
+/**
+ * drmm_kstrdup - &drm_device managed kstrdup()
+ * @dev: DRM device
+ * @s: 0-terminated string to be duplicated
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kstrdup(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
+{
+       size_t size;
+       char *buf;
+
+       if (!s)
+               return NULL;
+
+       size = strlen(s) + 1;
+       buf = drmm_kmalloc(dev, size, gfp);
+       if (buf)
+               memcpy(buf, s, size);
+       return buf;
+}
+EXPORT_SYMBOL_GPL(drmm_kstrdup);
+
+/**
+ * drmm_kfree - &drm_device managed kfree()
+ * @dev: DRM device
+ * @data: memory allocation to be freed
+ *
+ * This is a &drm_device managed version of kfree() which can be used to
+ * release memory allocated through drmm_kmalloc() or any of its related
+ * functions before the final drm_dev_put() of @dev.
+ */
+void drmm_kfree(struct drm_device *dev, void *data)
+{
+       struct drmres *dr_match = NULL, *dr;
+       unsigned long flags;
+
+       if (!data)
+               return;
+
+       spin_lock_irqsave(&dev->managed.lock, flags);
+       list_for_each_entry(dr, &dev->managed.resources, node.entry) {
+               if (dr->data == data) {
+                       dr_match = dr;
+                       del_dr(dev, dr_match);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+       if (WARN_ON(!dr_match))
+               return;
+
+       free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_kfree);
index 558baf989f5a8432871342c399c6974620485d1a..bb27c82757f17caa3b29c58b75438b518eaf3d6e 100644 (file)
@@ -169,7 +169,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
 EXPORT_SYMBOL(mipi_dbi_command_buf);
 
 /* This should only be used by mipi_dbi_command() */
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+                             size_t len)
 {
        u8 *buf;
        int ret;
@@ -510,6 +511,10 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
        if (!dbidev->dbi.command)
                return -EINVAL;
 
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               return ret;
+
        dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
        if (!dbidev->tx_buf)
                return -ENOMEM;
@@ -578,26 +583,6 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
 }
 EXPORT_SYMBOL(mipi_dbi_dev_init);
 
-/**
- * mipi_dbi_release - DRM driver release helper
- * @drm: DRM device
- *
- * This function finalizes and frees &mipi_dbi.
- *
- * Drivers can use this as their &drm_driver->release callback.
- */
-void mipi_dbi_release(struct drm_device *drm)
-{
-       struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm);
-
-       DRM_DEBUG_DRIVER("\n");
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(dbidev);
-}
-EXPORT_SYMBOL(mipi_dbi_release);
-
 /**
  * mipi_dbi_hw_reset - Hardware reset of controller
  * @dbi: MIPI DBI structure
@@ -1308,10 +1293,8 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
  * controller or getting the read command values.
  * Drivers can use this as their &drm_driver->debugfs_init callback.
  *
- * Returns:
- * Zero on success, negative error code on failure.
  */
-int mipi_dbi_debugfs_init(struct drm_minor *minor)
+void mipi_dbi_debugfs_init(struct drm_minor *minor)
 {
        struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
        umode_t mode = S_IFREG | S_IWUSR;
@@ -1320,8 +1303,6 @@ int mipi_dbi_debugfs_init(struct drm_minor *minor)
                mode |= S_IRUGO;
        debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
                            &mipi_dbi_debugfs_command_fops);
-
-       return 0;
 }
 EXPORT_SYMBOL(mipi_dbi_debugfs_init);
 
index 8981abe8b7c940e82fe33fa1085994035dd0daf4..f4ca1ff80af9f23ff2ada4dd545917014aa32e74 100644 (file)
@@ -212,20 +212,6 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
                                   &drm_mm_interval_tree_augment);
 }
 
-#define RB_INSERT(root, member, expr) do { \
-       struct rb_node **link = &root.rb_node, *rb = NULL; \
-       u64 x = expr(node); \
-       while (*link) { \
-               rb = *link; \
-               if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
-                       link = &rb->rb_left; \
-               else \
-                       link = &rb->rb_right; \
-       } \
-       rb_link_node(&node->member, rb, link); \
-       rb_insert_color(&node->member, &root); \
-} while (0)
-
 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 
@@ -255,16 +241,42 @@ static void insert_hole_size(struct rb_root_cached *root,
        rb_insert_color_cached(&node->rb_hole_size, root, first);
 }
 
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+                        struct drm_mm_node, rb_hole_addr,
+                        u64, subtree_max_hole, HOLE_SIZE)
+
+static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
+{
+       struct rb_node **link = &root->rb_node, *rb_parent = NULL;
+       u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
+       struct drm_mm_node *parent;
+
+       while (*link) {
+               rb_parent = *link;
+               parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
+               if (parent->subtree_max_hole < subtree_max_hole)
+                       parent->subtree_max_hole = subtree_max_hole;
+               if (start < HOLE_ADDR(parent))
+                       link = &parent->rb_hole_addr.rb_left;
+               else
+                       link = &parent->rb_hole_addr.rb_right;
+       }
+
+       rb_link_node(&node->rb_hole_addr, rb_parent, link);
+       rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
+}
+
 static void add_hole(struct drm_mm_node *node)
 {
        struct drm_mm *mm = node->mm;
 
        node->hole_size =
                __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+       node->subtree_max_hole = node->hole_size;
        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
        insert_hole_size(&mm->holes_size, node);
-       RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
+       insert_hole_addr(&mm->holes_addr, node);
 
        list_add(&node->hole_stack, &mm->hole_stack);
 }
@@ -275,8 +287,10 @@ static void rm_hole(struct drm_mm_node *node)
 
        list_del(&node->hole_stack);
        rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
-       rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+       rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
+                          &augment_callbacks);
        node->hole_size = 0;
+       node->subtree_max_hole = 0;
 
        DRM_MM_BUG_ON(drm_mm_hole_follows(node));
 }
@@ -361,9 +375,90 @@ first_hole(struct drm_mm *mm,
        }
 }
 
+/**
+ * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether left subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return previous node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete left subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * previous node of @entry if left subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_high_addr(struct drm_mm_node *entry, u64 size)
+{
+       struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
+       struct drm_mm_node *left_node;
+
+       if (!entry)
+               return NULL;
+
+       rb_node = &entry->rb_hole_addr;
+       if (rb_node->rb_left) {
+               left_rb_node = rb_node->rb_left;
+               parent_rb_node = rb_parent(rb_node);
+               left_node = rb_entry(left_rb_node,
+                                    struct drm_mm_node, rb_hole_addr);
+               if ((left_node->subtree_max_hole < size ||
+                    entry->size == entry->subtree_max_hole) &&
+                   parent_rb_node && parent_rb_node->rb_left != rb_node)
+                       return rb_hole_addr_to_node(parent_rb_node);
+       }
+
+       return rb_hole_addr_to_node(rb_prev(rb_node));
+}
+
+/**
+ * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether right subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return next node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete right subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * next node of @entry if right subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_low_addr(struct drm_mm_node *entry, u64 size)
+{
+       struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
+       struct drm_mm_node *right_node;
+
+       if (!entry)
+               return NULL;
+
+       rb_node = &entry->rb_hole_addr;
+       if (rb_node->rb_right) {
+               right_rb_node = rb_node->rb_right;
+               parent_rb_node = rb_parent(rb_node);
+               right_node = rb_entry(right_rb_node,
+                                     struct drm_mm_node, rb_hole_addr);
+               if ((right_node->subtree_max_hole < size ||
+                    entry->size == entry->subtree_max_hole) &&
+                   parent_rb_node && parent_rb_node->rb_right != rb_node)
+                       return rb_hole_addr_to_node(parent_rb_node);
+       }
+
+       return rb_hole_addr_to_node(rb_next(rb_node));
+}
+
 static struct drm_mm_node *
 next_hole(struct drm_mm *mm,
          struct drm_mm_node *node,
+         u64 size,
          enum drm_mm_insert_mode mode)
 {
        switch (mode) {
@@ -372,10 +467,10 @@ next_hole(struct drm_mm *mm,
                return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 
        case DRM_MM_INSERT_LOW:
-               return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+               return next_hole_low_addr(node, size);
 
        case DRM_MM_INSERT_HIGH:
-               return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+               return next_hole_high_addr(node, size);
 
        case DRM_MM_INSERT_EVICT:
                node = list_next_entry(node, hole_stack);
@@ -489,7 +584,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
        remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
        for (hole = first_hole(mm, range_start, range_end, size, mode);
             hole;
-            hole = once ? NULL : next_hole(mm, hole, mode)) {
+            hole = once ? NULL : next_hole(mm, hole, size, mode)) {
                u64 hole_start = __drm_mm_hole_node_start(hole);
                u64 hole_end = hole_start + hole->hole_size;
                u64 adj_start, adj_end;
index 08e6eff6a1797b2e0686af7610f061b587ca419d..5761f838a057611a7865e28f8358714c01e87562 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_file.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mode_config.h>
 #include <drm/drm_print.h>
 #include <linux/dma-resv.h>
@@ -373,8 +374,14 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
        return 0;
 }
 
+static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
+{
+       drm_mode_config_cleanup(dev);
+}
+
 /**
- * drm_mode_config_init - initialize DRM mode_configuration structure
+ * drmm_mode_config_init - managed DRM mode_configuration structure
+ *     initialization
  * @dev: DRM device
  *
  * Initialize @dev's mode_config structure, used for tracking the graphics
@@ -384,8 +391,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
  * problem, since this should happen single threaded at init time. It is the
  * driver's problem to ensure this guarantee.
  *
+ * Cleanup is automatically handled through registering drm_mode_config_cleanup
+ * with drmm_add_action().
+ *
+ * Returns: 0 on success, negative error value on failure.
  */
-void drm_mode_config_init(struct drm_device *dev)
+int drmm_mode_config_init(struct drm_device *dev)
 {
        mutex_init(&dev->mode_config.mutex);
        drm_modeset_lock_init(&dev->mode_config.connection_mutex);
@@ -443,8 +454,11 @@ void drm_mode_config_init(struct drm_device *dev)
                drm_modeset_acquire_fini(&modeset_ctx);
                dma_resv_fini(&resv);
        }
+
+       return drmm_add_action_or_reset(dev, drm_mode_config_init_release,
+                                       NULL);
 }
-EXPORT_SYMBOL(drm_mode_config_init);
+EXPORT_SYMBOL(drmm_mode_config_init);
 
 /**
  * drm_mode_config_cleanup - free up DRM mode_config info
@@ -456,6 +470,9 @@ EXPORT_SYMBOL(drm_mode_config_init);
  * Note that since this /should/ happen single-threaded at driver/device
  * teardown time, no locking is required. It's the driver's job to ensure that
  * this guarantee actually holds true.
+ *
+ * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for
+ * drivers to explicitly call this function.
  */
 void drm_mode_config_cleanup(struct drm_device *dev)
 {
@@ -532,3 +549,90 @@ void drm_mode_config_cleanup(struct drm_device *dev)
        drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+static u32 full_encoder_mask(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+       u32 encoder_mask = 0;
+
+       drm_for_each_encoder(encoder, dev)
+               encoder_mask |= drm_encoder_mask(encoder);
+
+       return encoder_mask;
+}
+
+/*
+ * For some reason we want the encoder itself included in
+ * possible_clones. Make life easy for drivers by allowing them
+ * to leave possible_clones unset if no cloning is possible.
+ */
+static void fixup_encoder_possible_clones(struct drm_encoder *encoder)
+{
+       if (encoder->possible_clones == 0)
+               encoder->possible_clones = drm_encoder_mask(encoder);
+}
+
+static void validate_encoder_possible_clones(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       u32 encoder_mask = full_encoder_mask(dev);
+       struct drm_encoder *other;
+
+       drm_for_each_encoder(other, dev) {
+               WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) !=
+                    !!(other->possible_clones & drm_encoder_mask(encoder)),
+                    "possible_clones mismatch: "
+                    "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. "
+                    "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n",
+                    encoder->base.id, encoder->name,
+                    drm_encoder_mask(encoder), encoder->possible_clones,
+                    other->base.id, other->name,
+                    drm_encoder_mask(other), other->possible_clones);
+       }
+
+       WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 ||
+            (encoder->possible_clones & ~encoder_mask) != 0,
+            "Bogus possible_clones: "
+            "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n",
+            encoder->base.id, encoder->name,
+            encoder->possible_clones, encoder_mask);
+}
+
+static u32 full_crtc_mask(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       u32 crtc_mask = 0;
+
+       drm_for_each_crtc(crtc, dev)
+               crtc_mask |= drm_crtc_mask(crtc);
+
+       return crtc_mask;
+}
+
+static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
+{
+       u32 crtc_mask = full_crtc_mask(encoder->dev);
+
+       WARN((encoder->possible_crtcs & crtc_mask) == 0 ||
+            (encoder->possible_crtcs & ~crtc_mask) != 0,
+            "Bogus possible_crtcs: "
+            "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n",
+            encoder->base.id, encoder->name,
+            encoder->possible_crtcs, crtc_mask);
+}
+
+void drm_mode_config_validate(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       drm_for_each_encoder(encoder, dev)
+               fixup_encoder_possible_clones(encoder);
+
+       drm_for_each_encoder(encoder, dev) {
+               validate_encoder_possible_clones(encoder);
+               validate_encoder_possible_crtcs(encoder);
+       }
+}
index 35c2719407a828d00f688fa7e2f336f38e3dabcf..901b078abf40c5d7bf1819e6f6cd4ac256c4f774 100644 (file)
@@ -402,12 +402,13 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_mode_obj_get_properties *arg = data;
        struct drm_mode_object *obj;
+       struct drm_modeset_acquire_ctx ctx;
        int ret = 0;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EOPNOTSUPP;
 
-       drm_modeset_lock_all(dev);
+       DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
 
        obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
        if (!obj) {
@@ -427,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
 out_unref:
        drm_mode_object_put(obj);
 out:
-       drm_modeset_unlock_all(dev);
+       DRM_MODESET_LOCK_ALL_END(ctx, ret);
        return ret;
 }
 
@@ -449,12 +450,13 @@ static int set_property_legacy(struct drm_mode_object *obj,
 {
        struct drm_device *dev = prop->dev;
        struct drm_mode_object *ref;
+       struct drm_modeset_acquire_ctx ctx;
        int ret = -EINVAL;
 
        if (!drm_property_change_valid_get(prop, prop_value, &ref))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
+       DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
        switch (obj->type) {
        case DRM_MODE_OBJECT_CONNECTOR:
                ret = drm_connector_set_obj_prop(obj, prop, prop_value);
@@ -468,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
                break;
        }
        drm_property_change_valid_put(prop, ref);
-       drm_modeset_unlock_all(dev);
+       DRM_MODESET_LOCK_ALL_END(ctx, ret);
 
        return ret;
 }
index d4d64518e11b8fc06f45eddf2673a8c6ba0bfe34..fec1c33b30456ab33cb2fa8f45b61f050b6c8fcc 100644 (file)
@@ -747,32 +747,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
 }
 EXPORT_SYMBOL(drm_mode_set_name);
 
-/**
- * drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * Returns:
- * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
- */
-int drm_mode_hsync(const struct drm_display_mode *mode)
-{
-       unsigned int calc_val;
-
-       if (mode->hsync)
-               return mode->hsync;
-
-       if (mode->htotal <= 0)
-               return 0;
-
-       calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
-       calc_val += 500;                                /* round to 1000Hz */
-       calc_val /= 1000;                               /* truncate to kHz */
-
-       return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
 /**
  * drm_mode_vrefresh - get the vrefresh of a mode
  * @mode: mode
index 81aa215619821ab94b86c5207130248ebc5f94a0..75e2b7053f353b818f770c1a728413e02fe2f576 100644 (file)
 #include <drm/drm.h>
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 
 #include "drm_internal.h"
 #include "drm_legacy.h"
 
+#ifdef CONFIG_DRM_LEGACY
+
 /**
  * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
  * @dev: DRM device
@@ -93,6 +94,7 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 }
 
 EXPORT_SYMBOL(drm_pci_free);
+#endif
 
 static int drm_get_pci_domain(struct drm_device *dev)
 {
index d6ad60ab0d389a3f83d073ffaca2908d3af48c24..4af173ced32772e1b23ca173ec983cf3fbeb1ae6 100644 (file)
@@ -289,6 +289,8 @@ EXPORT_SYMBOL(drm_universal_plane_init);
 
 int drm_plane_register_all(struct drm_device *dev)
 {
+       unsigned int num_planes = 0;
+       unsigned int num_zpos = 0;
        struct drm_plane *plane;
        int ret = 0;
 
@@ -297,8 +299,15 @@ int drm_plane_register_all(struct drm_device *dev)
                        ret = plane->funcs->late_register(plane);
                if (ret)
                        return ret;
+
+               if (plane->zpos_property)
+                       num_zpos++;
+               num_planes++;
        }
 
+       drm_WARN(dev, num_zpos && num_planes != num_zpos,
+                "Mixing planes with and without zpos property is invalid\n");
+
        return 0;
 }
 
index da7b0b0c1090de60bc4baa2f5b4b03e0466cdf0c..758bf74e1cabef7195d80ca02b4fe3f38c896871 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_print.h>
 #include <drm/drm_vblank.h>
 /**
  * DOC: vblank handling
  *
+ * From the computer's perspective, every time the monitor displays
+ * a new frame the scanout engine has "scanned out" the display image
+ * from top to bottom, one row of pixels at a time. The current row
+ * of pixels is referred to as the current scanline.
+ *
+ * In addition to the display's visible area, there's usually a couple of
+ * extra scanlines which aren't actually displayed on the screen.
+ * These extra scanlines don't contain image data and are occasionally used
+ * for features like audio and infoframes. The region made up of these
+ * scanlines is referred to as the vertical blanking region, or vblank for
+ * short.
+ *
+ * For historical reference, the vertical blanking period was designed to
+ * give the electron gun (on CRTs) enough time to move back to the top of
+ * the screen to start scanning out the next frame. Similar for horizontal
+ * blanking periods. They were designed to give the electron gun enough
+ * time to move back to the other side of the screen to start scanning the
+ * next scanline.
+ *
+ * ::
+ *
+ *
+ *    physical →   ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ *    top of      |                                        |
+ *    display     |                                        |
+ *                |               New frame                |
+ *                |                                        |
+ *                |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|
+ *                |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline,
+ *                |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|   updates the
+ *                |                                        |   frame as it
+ *                |                                        |   travels down
+ *                |                                        |   ("sacn out")
+ *                |               Old frame                |
+ *                |                                        |
+ *                |                                        |
+ *                |                                        |
+ *                |                                        |   physical
+ *                |                                        |   bottom of
+ *    vertical    |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display
+ *    blanking    ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ *    region   →  ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ *                ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ *    start of →   ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ *    new frame
+ *
+ * "Physical top of display" is the reference point for the high-precision/
+ * corrected timestamp.
+ *
+ * On a lot of display hardware, programming needs to take effect during the
+ * vertical blanking period so that settings like gamma, the image buffer
+ * buffer to be scanned out, etc. can safely be changed without showing
+ * any visual artifacts on the screen. In some unforgiving hardware, some of
+ * this programming has to both start and end in the same vblank. To help
+ * with the timing of the hardware programming, an interrupt is usually
+ * available to notify the driver when it can start the updating of registers.
+ * The interrupt is in this context named the vblank interrupt.
+ *
+ * The vblank interrupt may be fired at different points depending on the
+ * hardware. Some hardware implementations will fire the interrupt when the
+ * new frame start, other implementations will fire the interrupt at different
+ * points in time.
+ *
  * Vertical blanking plays a major role in graphics rendering. To achieve
  * tear-free display, users must synchronize page flips and/or rendering to
  * vertical blanking. The DRM API offers ioctls to perform page flips
@@ -425,14 +489,10 @@ static void vblank_disable_fn(struct timer_list *t)
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 
-void drm_vblank_cleanup(struct drm_device *dev)
+static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
 {
        unsigned int pipe;
 
-       /* Bail if the driver didn't call drm_vblank_init() */
-       if (dev->num_crtcs == 0)
-               return;
-
        for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
                struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
@@ -441,10 +501,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
 
                del_timer_sync(&vblank->disable_timer);
        }
-
-       kfree(dev->vblank);
-
-       dev->num_crtcs = 0;
 }
 
 /**
@@ -453,25 +509,29 @@ void drm_vblank_cleanup(struct drm_device *dev)
  * @num_crtcs: number of CRTCs supported by @dev
  *
  * This function initializes vblank support for @num_crtcs display pipelines.
- * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for
- * drivers with a &drm_driver.release callback.
+ * Cleanup is handled automatically through a cleanup function added with
+ * drmm_add_action().
  *
  * Returns:
  * Zero on success or a negative error code on failure.
  */
 int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
 {
-       int ret = -ENOMEM;
+       int ret;
        unsigned int i;
 
        spin_lock_init(&dev->vbl_lock);
        spin_lock_init(&dev->vblank_time_lock);
 
+       dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+       if (!dev->vblank)
+               return -ENOMEM;
+
        dev->num_crtcs = num_crtcs;
 
-       dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
-       if (!dev->vblank)
-               goto err;
+       ret = drmm_add_action(dev, drm_vblank_init_release, NULL);
+       if (ret)
+               return ret;
 
        for (i = 0; i < num_crtcs; i++) {
                struct drm_vblank_crtc *vblank = &dev->vblank[i];
@@ -486,10 +546,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
        DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
 
        return 0;
-
-err:
-       dev->num_crtcs = 0;
-       return ret;
 }
 EXPORT_SYMBOL(drm_vblank_init);
 
index aa88911bbc06d48aea6c664a25f3bbb47deb1115..56197ae0b2f96a9812890489bf82a1a01148571a 100644 (file)
@@ -595,8 +595,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                        vma->vm_ops = &drm_vm_ops;
                        break;
                }
+               fallthrough;    /* to _DRM_FRAME_BUFFER... */
 #endif
-               /* fall through - to _DRM_FRAME_BUFFER... */
        case _DRM_FRAME_BUFFER:
        case _DRM_REGISTERS:
                offset = drm_core_get_reg_ofs(dev);
@@ -621,7 +621,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                    vma->vm_end - vma->vm_start, vma->vm_page_prot))
                        return -EAGAIN;
                vma->vm_page_prot = drm_dma_prot(map->type, vma);
-               /* fall through - to _DRM_SHM */
+               fallthrough;    /* to _DRM_SHM */
        case _DRM_SHM:
                vma->vm_ops = &drm_vm_shm_ops;
                vma->vm_private_data = (void *)map;
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
deleted file mode 100644 (file)
index 2000d9b..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
- * buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM is managed
- * by &struct drm_vram_mm (VRAM MM).
- *
- * With the GEM interface userspace applications create, manage and destroy
- * graphics buffers, such as an on-screen framebuffer. GEM does not provide
- * an implementation of these interfaces. It's up to the DRM driver to
- * provide an implementation that suits the hardware. If the hardware device
- * contains dedicated video memory, the DRM driver can use the VRAM helper
- * library. Each active buffer object is stored in video RAM. Active
- * buffer are used for drawing the current frame, typically something like
- * the frame's scanout buffer or the cursor image. If there's no more space
- * left in VRAM, inactive GEM objects can be moved to system memory.
- *
- * The easiest way to use the VRAM helper library is to call
- * drm_vram_helper_alloc_mm(). The function allocates and initializes an
- * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
- * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
- * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
- * as illustrated below.
- *
- * .. code-block:: c
- *
- *     struct file_operations fops ={
- *             .owner = THIS_MODULE,
- *             DRM_VRAM_MM_FILE_OPERATION
- *     };
- *     struct drm_driver drv = {
- *             .driver_feature = DRM_ ... ,
- *             .fops = &fops,
- *             DRM_GEM_VRAM_DRIVER
- *     };
- *
- *     int init_drm_driver()
- *     {
- *             struct drm_device *dev;
- *             uint64_t vram_base;
- *             unsigned long vram_size;
- *             int ret;
- *
- *             // setup device, vram base and size
- *             // ...
- *
- *             ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
- *             if (ret)
- *                     return ret;
- *             return 0;
- *     }
- *
- * This creates an instance of &struct drm_vram_mm, exports DRM userspace
- * interfaces for GEM buffer management and initializes file operations to
- * allow for accessing created GEM buffers. With this setup, the DRM driver
- * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
- * to userspace.
- *
- * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
- * in the driver's clean-up code.
- *
- * .. code-block:: c
- *
- *     void fini_drm_driver()
- *     {
- *             struct drm_device *dev = ...;
- *
- *             drm_vram_helper_release_mm(dev);
- *     }
- *
- * For drawing or scanout operations, buffer object have to be pinned in video
- * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
- * A buffer object that is pinned in video RAM has a fixed address within that
- * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
- * it's used to program the hardware's scanout engine for framebuffers, set
- * the cursor overlay's image for a mouse cursor, or use it as input to the
- * hardware's draing engine.
- *
- * To access a buffer object's memory from the DRM driver, call
- * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
- * space and returns the memory address. Use drm_gem_vram_kunmap() to
- * release the mapping.
- */
-
-MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
-MODULE_LICENSE("GPL");
index a8685b2e1803cf91318fd2b50960915710426353..27c948f5dfeb7ea0baf55339a19aa2727b35f50f 100644 (file)
@@ -231,21 +231,11 @@ static struct drm_info_list etnaviv_debugfs_list[] = {
                {"ring", show_each_gpu, 0, etnaviv_ring_show},
 };
 
-static int etnaviv_debugfs_init(struct drm_minor *minor)
+static void etnaviv_debugfs_init(struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       ret = drm_debugfs_create_files(etnaviv_debugfs_list,
-                       ARRAY_SIZE(etnaviv_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
-               return ret;
-       }
-
-       return ret;
+       drm_debugfs_create_files(etnaviv_debugfs_list,
+                                ARRAY_SIZE(etnaviv_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index 5ee090691390f3f967c35d63ac41f85e96b0da8b..9ac51b6ab34b1dfa302efdc7c94af344fb49f759 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_crtc.h"
@@ -135,10 +136,6 @@ static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
        .disable = exynos_dp_nop,
 };
 
-static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
 {
        int ret;
@@ -167,8 +164,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
                        return ret;
        }
 
-       drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
 
index 43fa0f26c052e97bc87248660d827e48c0ac14d8..7ba5354e7d9446f97d094eb8112cb33d7f1fe197 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include <video/of_videomode.h>
 #include <video/videomode.h>
@@ -149,10 +150,6 @@ static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
        .disable = exynos_dpi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 enum {
        FIMD_PORT_IN0,
        FIMD_PORT_IN1,
@@ -201,8 +198,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
 {
        int ret;
 
-       drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
 
index 57defeb445223b533df7e38b144182b733a78c11..dbd80f1e4c78bd44118acdc20449f7a4f82bd624 100644 (file)
@@ -76,7 +76,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 }
 
 static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
-       .fault = exynos_drm_gem_fault,
        .open = drm_gem_vm_open,
        .close = drm_gem_vm_close,
 };
index e080aa92338c060252b985484067405e23e2c7d6..ee96a95fb6be50824d3483530d5e4ae14135d1df 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
 
 #define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
 
-static char *clk_names[5] = { "bus_clk", "sclk_mipi",
+static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
        "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
        "sclk_rgb_vclk_to_dsim0" };
 
@@ -1523,10 +1524,6 @@ static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
        .disable = exynos_dsi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
 
 static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
@@ -1704,8 +1701,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
        struct drm_bridge *in_bridge;
        int ret;
 
-       drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
 
@@ -1763,10 +1759,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
        dsi->dev = dev;
        dsi->driver_data = of_device_get_match_data(dev);
 
-       ret = exynos_dsi_parse_dt(dsi);
-       if (ret)
-               return ret;
-
        dsi->supplies[0].supply = "vddcore";
        dsi->supplies[1].supply = "vddio";
        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
@@ -1813,10 +1805,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
        }
 
        dsi->irq = platform_get_irq(pdev, 0);
-       if (dsi->irq < 0) {
-               dev_err(dev, "failed to request dsi irq resource\n");
+       if (dsi->irq < 0)
                return dsi->irq;
-       }
 
        irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
        ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
@@ -1827,11 +1817,25 @@ static int exynos_dsi_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = exynos_dsi_parse_dt(dsi);
+       if (ret)
+               return ret;
+
        platform_set_drvdata(pdev, &dsi->encoder);
 
        pm_runtime_enable(dev);
 
-       return component_add(dev, &exynos_dsi_component_ops);
+       ret = component_add(dev, &exynos_dsi_component_ops);
+       if (ret)
+               goto err_disable_runtime;
+
+       return 0;
+
+err_disable_runtime:
+       pm_runtime_disable(dev);
+       of_node_put(dsi->in_bridge_node);
+
+       return ret;
 }
 
 static int exynos_dsi_remove(struct platform_device *pdev)
index e6ceaf36fb044ab2081b871b83f5b7528cc348a2..56a2b47e1af79fd254e992abdd4d256fbd0dbe3e 100644 (file)
@@ -76,7 +76,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
        struct fb_info *fbi;
        struct drm_framebuffer *fb = helper->fb;
        unsigned int size = fb->width * fb->height * fb->format->cpp[0];
-       unsigned int nr_pages;
        unsigned long offset;
 
        fbi = drm_fb_helper_alloc_fbi(helper);
@@ -90,16 +89,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 
        drm_fb_helper_fill_info(fbi, helper, sizes);
 
-       nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
-       exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
-                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
-       if (!exynos_gem->kvaddr) {
-               DRM_DEV_ERROR(to_dma_dev(helper->dev),
-                             "failed to map pages to kernel space.\n");
-               return -EIO;
-       }
-
        offset = fbi->var.xoffset * fb->format->cpp[0];
        offset += fbi->var.yoffset * fb->pitches[0];
 
@@ -133,18 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
        size = mode_cmd.pitches[0] * mode_cmd.height;
 
-       exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
-       /*
-        * If physically contiguous memory allocation fails and if IOMMU is
-        * supported then try to get buffer from non physically contiguous
-        * memory area.
-        */
-       if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) {
-               dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
-               exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
-                                                  size);
-       }
-
+       exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_WC, size, true);
        if (IS_ERR(exynos_gem))
                return PTR_ERR(exynos_gem);
 
@@ -229,12 +207,8 @@ err_init:
 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
                                      struct drm_fb_helper *fb_helper)
 {
-       struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
-       struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
        struct drm_framebuffer *fb;
 
-       vunmap(exynos_gem->kvaddr);
-
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
                fb = fb_helper->fb;
index d734d9d51762f8923685c76ab67ad135637eb4b6..0df57ee341441cb2a57dbe8007d2d3573d941c33 100644 (file)
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 
-static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
 {
        struct drm_device *dev = exynos_gem->base.dev;
-       unsigned long attr;
-       unsigned int nr_pages;
-       struct sg_table sgt;
-       int ret = -ENOMEM;
+       unsigned long attr = 0;
 
        if (exynos_gem->dma_addr) {
                DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
                return 0;
        }
 
-       exynos_gem->dma_attrs = 0;
-
        /*
         * if EXYNOS_BO_CONTIG, fully physically contiguous memory
         * region will be allocated else physically contiguous
         * as possible.
         */
        if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
-               exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
+               attr |= DMA_ATTR_FORCE_CONTIGUOUS;
 
        /*
         * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -46,61 +41,29 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
         */
        if (exynos_gem->flags & EXYNOS_BO_WC ||
                        !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
-               attr = DMA_ATTR_WRITE_COMBINE;
+               attr |= DMA_ATTR_WRITE_COMBINE;
        else
-               attr = DMA_ATTR_NON_CONSISTENT;
-
-       exynos_gem->dma_attrs |= attr;
-       exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+               attr |= DMA_ATTR_NON_CONSISTENT;
 
-       nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
-       exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
-                       GFP_KERNEL | __GFP_ZERO);
-       if (!exynos_gem->pages) {
-               DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
-               return -ENOMEM;
-       }
+       /* FBDev emulation requires kernel mapping */
+       if (!kvmap)
+               attr |= DMA_ATTR_NO_KERNEL_MAPPING;
 
+       exynos_gem->dma_attrs = attr;
        exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
                                             &exynos_gem->dma_addr, GFP_KERNEL,
                                             exynos_gem->dma_attrs);
        if (!exynos_gem->cookie) {
                DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
-               goto err_free;
-       }
-
-       ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
-                                   exynos_gem->dma_addr, exynos_gem->size,
-                                   exynos_gem->dma_attrs);
-       if (ret < 0) {
-               DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
-               goto err_dma_free;
-       }
-
-       if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
-                                            nr_pages)) {
-               DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
-               ret = -EINVAL;
-               goto err_sgt_free;
+               return -ENOMEM;
        }
 
-       sg_free_table(&sgt);
+       if (kvmap)
+               exynos_gem->kvaddr = exynos_gem->cookie;
 
        DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
-
        return 0;
-
-err_sgt_free:
-       sg_free_table(&sgt);
-err_dma_free:
-       dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
-                      exynos_gem->dma_addr, exynos_gem->dma_attrs);
-err_free:
-       kvfree(exynos_gem->pages);
-
-       return ret;
 }
 
 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
@@ -118,8 +81,6 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
        dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
                        (dma_addr_t)exynos_gem->dma_addr,
                        exynos_gem->dma_attrs);
-
-       kvfree(exynos_gem->pages);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -203,7 +164,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
 
 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
                                             unsigned int flags,
-                                            unsigned long size)
+                                            unsigned long size,
+                                            bool kvmap)
 {
        struct exynos_drm_gem *exynos_gem;
        int ret;
@@ -237,7 +199,7 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
        /* set memory type and cache attribute from user side. */
        exynos_gem->flags = flags;
 
-       ret = exynos_drm_alloc_buf(exynos_gem);
+       ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
        if (ret < 0) {
                drm_gem_object_release(&exynos_gem->base);
                kfree(exynos_gem);
@@ -254,7 +216,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
        struct exynos_drm_gem *exynos_gem;
        int ret;
 
-       exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
+       exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
        if (IS_ERR(exynos_gem))
                return PTR_ERR(exynos_gem);
 
@@ -365,7 +327,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        else
                flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
 
-       exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
+       exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
        if (IS_ERR(exynos_gem)) {
                dev_warn(dev->dev, "FB allocation failed.\n");
                return PTR_ERR(exynos_gem);
@@ -381,26 +343,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        return 0;
 }
 
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
-{
-       struct vm_area_struct *vma = vmf->vma;
-       struct drm_gem_object *obj = vma->vm_private_data;
-       struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
-       unsigned long pfn;
-       pgoff_t page_offset;
-
-       page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
-       if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
-               DRM_ERROR("invalid page offset\n");
-               return VM_FAULT_SIGBUS;
-       }
-
-       pfn = page_to_pfn(exynos_gem->pages[page_offset]);
-       return vmf_insert_mixed(vma, vmf->address,
-                       __pfn_to_pfn_t(pfn, PFN_DEV));
-}
-
 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
                                   struct vm_area_struct *vma)
 {
@@ -462,11 +404,24 @@ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
-       int npages;
+       struct drm_device *drm_dev = obj->dev;
+       struct sg_table *sgt;
+       int ret;
 
-       npages = exynos_gem->size >> PAGE_SHIFT;
+       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return ERR_PTR(-ENOMEM);
 
-       return drm_prime_pages_to_sg(exynos_gem->pages, npages);
+       ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
+                                   exynos_gem->dma_addr, exynos_gem->size,
+                                   exynos_gem->dma_attrs);
+       if (ret) {
+               DRM_ERROR("failed to get sgtable, %d\n", ret);
+               kfree(sgt);
+               return ERR_PTR(ret);
+       }
+
+       return sgt;
 }
 
 struct drm_gem_object *
@@ -475,52 +430,47 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
                                     struct sg_table *sgt)
 {
        struct exynos_drm_gem *exynos_gem;
-       int npages;
-       int ret;
-
-       exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
-       if (IS_ERR(exynos_gem)) {
-               ret = PTR_ERR(exynos_gem);
-               return ERR_PTR(ret);
-       }
 
-       exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+       if (sgt->nents < 1)
+               return ERR_PTR(-EINVAL);
 
-       npages = exynos_gem->size >> PAGE_SHIFT;
-       exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-       if (!exynos_gem->pages) {
-               ret = -ENOMEM;
-               goto err;
+       /*
+        * Check if the provided buffer has been mapped as contiguous
+        * into DMA address space.
+        */
+       if (sgt->nents > 1) {
+               dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+               struct scatterlist *s;
+               unsigned int i;
+
+               for_each_sg(sgt->sgl, s, sgt->nents, i) {
+                       if (!sg_dma_len(s))
+                               break;
+                       if (sg_dma_address(s) != next_addr) {
+                               DRM_ERROR("buffer chunks must be mapped contiguously");
+                               return ERR_PTR(-EINVAL);
+                       }
+                       next_addr = sg_dma_address(s) + sg_dma_len(s);
+               }
        }
 
-       ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
-                                              npages);
-       if (ret < 0)
-               goto err_free_large;
-
-       exynos_gem->sgt = sgt;
+       exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
+       if (IS_ERR(exynos_gem))
+               return ERR_CAST(exynos_gem);
 
-       if (sgt->nents == 1) {
-               /* always physically continuous memory if sgt->nents is 1. */
-               exynos_gem->flags |= EXYNOS_BO_CONTIG;
-       } else {
-               /*
-                * this case could be CONTIG or NONCONTIG type but for now
-                * sets NONCONTIG.
-                * TODO. we have to find a way that exporter can notify
-                * the type of its own buffer to importer.
-                */
+       /*
+        * Buffer has been mapped as contiguous into DMA address space,
+        * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
+        * We assume a simplified logic below:
+        */
+       if (is_drm_iommu_supported(dev))
                exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
-       }
+       else
+               exynos_gem->flags |= EXYNOS_BO_CONTIG;
 
+       exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+       exynos_gem->sgt = sgt;
        return &exynos_gem->base;
-
-err_free_large:
-       kvfree(exynos_gem->pages);
-err:
-       drm_gem_object_release(&exynos_gem->base);
-       kfree(exynos_gem);
-       return ERR_PTR(ret);
 }
 
 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
index 42ec67bc262d4436ba40aefedcccd92ef1d09457..6ef001f890aa127dad03ea437fbfc4c3889cb77d 100644 (file)
  * @base: a gem object.
  *     - a new handle to this gem object would be created
  *     by drm_gem_handle_create().
- * @buffer: a pointer to exynos_drm_gem_buffer object.
- *     - contain the information to memory region allocated
- *     by user request or at framebuffer creation.
- *     continuous memory region allocated by user request
- *     or at framebuffer creation.
  * @flags: indicate memory type to allocated buffer and cache attruibute.
  * @size: size requested from user, in bytes and this size is aligned
  *     in page unit.
  * @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
+ * @kvaddr: kernel virtual address to allocated memory region (for fbdev)
  * @dma_addr: bus address(accessed by dma) to allocated memory region.
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
- * @pages: Array of backing pages.
+ * @dma_attrs: attrs passed dma mapping framework
  * @sgt: Imported sg_table.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
@@ -48,7 +43,6 @@ struct exynos_drm_gem {
        void __iomem            *kvaddr;
        dma_addr_t              dma_addr;
        unsigned long           dma_attrs;
-       struct page             **pages;
        struct sg_table         *sgt;
 };
 
@@ -58,7 +52,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
 /* create a new buffer with gem object */
 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
                                             unsigned int flags,
-                                            unsigned long size);
+                                            unsigned long size,
+                                            bool kvmap);
 
 /*
  * request gem object creation and buffer allocation as the size
@@ -101,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
                               struct drm_device *dev,
                               struct drm_mode_create_dumb *args);
 
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
-
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
index f41d75923557a23a0e1fcfc103aac0c76abb7277..a86abc173605e5d3840da55b0cafb6cedf903287 100644 (file)
@@ -88,7 +88,7 @@
 
 #define MIC_BS_SIZE_2D(x)      ((x) & 0x3fff)
 
-static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
+static const char *const clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
 #define NUM_CLKS               ARRAY_SIZE(clk_names)
 static DEFINE_MUTEX(mic_mutex);
 
index dafa87b82052967ca27292c261008125a8e4d58d..2d94afba031e429e351ffe1d2e7d3cf1f99295b2 100644 (file)
@@ -293,10 +293,8 @@ static int rotator_probe(struct platform_device *pdev)
                return PTR_ERR(rot->regs);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "failed to get irq\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
                               rot);
index 93c43c8d914ee78a1b275b700c6486026958ce84..ce1857138f89301cdc1a134744aea46c91622abd 100644 (file)
@@ -502,10 +502,8 @@ static int scaler_probe(struct platform_device *pdev)
                return PTR_ERR(scaler->regs);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "failed to get irq\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
                                        IRQF_ONESHOT, "drm_scaler", scaler);
index b320b3a21ad4f60e4efd373217861829cbdc363c..e5662bdcbbde3dd1babb055fa879ab50d82ef6d0 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
 #include <drm/exynos_drm.h>
 
@@ -213,6 +214,12 @@ static ssize_t vidi_store_connection(struct device *dev,
 static DEVICE_ATTR(connection, 0644, vidi_show_connection,
                        vidi_store_connection);
 
+static struct attribute *vidi_attrs[] = {
+       &dev_attr_connection.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(vidi);
+
 int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
                                struct drm_file *file_priv)
 {
@@ -369,10 +376,6 @@ static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs =
        .disable = exynos_vidi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int vidi_bind(struct device *dev, struct device *master, void *data)
 {
        struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -406,8 +409,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(ctx->crtc);
        }
 
-       drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
 
@@ -443,7 +445,6 @@ static int vidi_probe(struct platform_device *pdev)
 {
        struct vidi_context *ctx;
        struct device *dev = &pdev->dev;
-       int ret;
 
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
@@ -457,23 +458,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ctx);
 
-       ret = device_create_file(dev, &dev_attr_connection);
-       if (ret < 0) {
-               DRM_DEV_ERROR(dev,
-                             "failed to create connection sysfs.\n");
-               return ret;
-       }
-
-       ret = component_add(dev, &vidi_component_ops);
-       if (ret)
-               goto err_remove_file;
-
-       return ret;
-
-err_remove_file:
-       device_remove_file(dev, &dev_attr_connection);
-
-       return ret;
+       return component_add(dev, &vidi_component_ops);
 }
 
 static int vidi_remove(struct platform_device *pdev)
@@ -498,5 +483,6 @@ struct platform_driver vidi_driver = {
        .driver         = {
                .name   = "exynos-drm-vidi",
                .owner  = THIS_MODULE,
+               .dev_groups = vidi_groups,
        },
 };
index 1a7c828fc41d8d723fe444a5b9bd35de3361b513..95dd399aa9ccb3da044acfc4a3a3e6d27198d6ae 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "exynos_drm_crtc.h"
 #include "regs-hdmi.h"
@@ -1559,10 +1560,6 @@ static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs =
        .disable        = hdmi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void hdmi_audio_shutdown(struct device *dev, void *data)
 {
        struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1843,8 +1840,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 
        hdata->phy_clk.enable = hdmiphy_clk_enable;
 
-       drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
 
index 21b726baedeaa625a975d45a37fe3ef834baa154..c7e2e2ebc327bcf9beef2069c15fb52983655800 100644 (file)
@@ -1244,9 +1244,11 @@ static int mixer_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ctx);
 
+       pm_runtime_enable(dev);
+
        ret = component_add(&pdev->dev, &mixer_component_ops);
-       if (!ret)
-               pm_runtime_enable(dev);
+       if (ret)
+               pm_runtime_disable(dev);
 
        return ret;
 }
index cff344367f81f49758394807627f5f68afd5857a..9b0c4736c21ae565a59a0e8cf98cf3e7d759cee6 100644 (file)
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "fsl_dcu_drm_drv.h"
 #include "fsl_tcon.h"
 
-static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = fsl_dcu_drm_encoder_destroy,
-};
-
 int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
                               struct drm_crtc *crtc)
 {
@@ -38,8 +30,8 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
        if (fsl_dev->tcon)
                fsl_tcon_bypass_enable(fsl_dev->tcon);
 
-       ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(fsl_dev->drm, encoder,
+                                     DRM_MODE_ENCODER_LVDS);
        if (ret < 0)
                return ret;
 
index 29c36d63b20e1d1d81ca1a9b599d75812e55aecc..88535f5aacc5d39aa0bc484a5cad41c5ae325701 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "cdv_device.h"
 #include "intel_bios.h"
 #include "power.h"
@@ -237,15 +239,6 @@ static const struct drm_connector_helper_funcs
        .best_encoder = gma_best_encoder,
 };
 
-static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
-       .destroy = cdv_intel_crt_enc_destroy,
-};
-
 void cdv_intel_crt_init(struct drm_device *dev,
                        struct psb_intel_mode_device *mode_dev)
 {
@@ -271,8 +264,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
                &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
        encoder = &gma_encoder->base;
-       drm_encoder_init(dev, encoder,
-               &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
 
index 5772b2dce0d662170731de50b2400ed52feb4c6d..f41cbb753bb469ccc7dc10f4a4736f371f293004 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "gma_display.h"
 #include "psb_drv.h"
@@ -1271,37 +1272,8 @@ cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZ
        return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
 }
 
-
-#if 0
-static char    *voltage_names[] = {
-       "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char    *pre_emph_names[] = {
-       "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char    *link_train_names[] = {
-       "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
 #define CDV_DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
-/*
-static uint8_t
-cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
-{
-       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-       case DP_TRAIN_VOLTAGE_SWING_400:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_600:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_800:
-               return DP_TRAIN_PRE_EMPHASIS_3_5;
-       case DP_TRAIN_VOLTAGE_SWING_1200:
-       default:
-               return DP_TRAIN_PRE_EMPHASIS_0;
-       }
-}
-*/
+
 static void
 cdv_intel_get_adjust_train(struct gma_encoder *encoder)
 {
@@ -1908,11 +1880,6 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
        kfree(connector);
 }
 
-static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
 static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
        .dpms = cdv_intel_dp_dpms,
        .mode_fixup = cdv_intel_dp_mode_fixup,
@@ -1935,11 +1902,6 @@ static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_fun
        .best_encoder = gma_best_encoder,
 };
 
-static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
-       .destroy = cdv_intel_dp_encoder_destroy,
-};
-
-
 static void cdv_intel_dp_add_properties(struct drm_connector *connector)
 {
        cdv_intel_attach_force_audio_property(connector);
@@ -2016,8 +1978,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
        encoder = &gma_encoder->base;
 
        drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
-       drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
 
@@ -2120,7 +2081,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
                if (ret == 0) {
                        /* if this fails, presume the device is a ghost */
                        DRM_INFO("failed to retrieve link info, disabling eDP\n");
-                       cdv_intel_dp_encoder_destroy(encoder);
+                       drm_encoder_cleanup(encoder);
                        cdv_intel_dp_destroy(connector);
                        goto err_priv;
                } else {
index 1711a41acc165414a5533048c745f18b2b274ea8..0d12c6ffbc4078796a9e1fcc56e3a583dba54786 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "cdv_device.h"
 #include "psb_drv.h"
@@ -311,8 +312,7 @@ void cdv_hdmi_init(struct drm_device *dev,
                           &cdv_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_DVID);
 
-       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_HDMI;
index ea0a5d9a0acc3111515cc5069d248f4c9509dba9..eaaf4efec21765a494c3bc2ef6750bc4cd080b74 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "cdv_device.h"
 #include "intel_bios.h"
 #include "power.h"
@@ -72,89 +74,6 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
        return retval;
 }
 
-#if 0
-/*
- * Set LVDS backlight level by I2C command
- */
-static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
-                                       unsigned int level)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-       struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
-       u8 out_buf[2];
-       unsigned int blc_i2c_brightness;
-
-       struct i2c_msg msgs[] = {
-               {
-                       .addr = lvds_i2c_bus->slave_addr,
-                       .flags = 0,
-                       .len = 2,
-                       .buf = out_buf,
-               }
-       };
-
-       blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
-                            BRIGHTNESS_MASK /
-                            BRIGHTNESS_MAX_LEVEL);
-
-       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
-               blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
-
-       out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
-       out_buf[1] = (u8)blc_i2c_brightness;
-
-       if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
-               return 0;
-
-       DRM_ERROR("I2C transfer error\n");
-       return -1;
-}
-
-
-static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-
-       u32 max_pwm_blc;
-       u32 blc_pwm_duty_cycle;
-
-       max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
-
-       /*BLC_PWM_CTL Should be initiated while backlight device init*/
-       BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
-
-       blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
-
-       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
-               blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
-
-       blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
-       REG_WRITE(BLC_PWM_CTL,
-                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
-                 (blc_pwm_duty_cycle));
-
-       return 0;
-}
-
-/*
- * Set LVDS backlight level either by I2C or PWM
- */
-void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-
-       if (!dev_priv->lvds_bl) {
-               DRM_ERROR("NO LVDS Backlight Info\n");
-               return;
-       }
-
-       if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
-               cdv_lvds_i2c_set_brightness(dev, level);
-       else
-               cdv_lvds_pwm_set_brightness(dev, level);
-}
-#endif
-
 /**
  * Sets the backlight level.
  *
@@ -499,16 +418,6 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
        .destroy = cdv_intel_lvds_destroy,
 };
 
-
-static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
-       .destroy = cdv_intel_lvds_enc_destroy,
-};
-
 /*
  * Enumerate the child dev array parsed from VBT to check whether
  * the LVDS is present.
@@ -616,10 +525,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
                           &cdv_intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
-       drm_encoder_init(dev, encoder,
-                        &cdv_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
-
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_LVDS;
index 1d8f67e4795a685f2d720074c0c126e6a8492833..23a78d7553827862d512368cc372c2b88a24b238 100644 (file)
@@ -577,31 +577,31 @@ static void psb_setup_outputs(struct drm_device *dev)
                        break;
                case INTEL_OUTPUT_SDVO:
                        crtc_mask = dev_priv->ops->sdvo_mask;
-                       clone_mask = (1 << INTEL_OUTPUT_SDVO);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_LVDS:
-                       crtc_mask = dev_priv->ops->lvds_mask;
-                       clone_mask = (1 << INTEL_OUTPUT_LVDS);
+                       crtc_mask = dev_priv->ops->lvds_mask;
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_MIPI:
                        crtc_mask = (1 << 0);
-                       clone_mask = (1 << INTEL_OUTPUT_MIPI);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_MIPI2:
                        crtc_mask = (1 << 2);
-                       clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_HDMI:
-                       crtc_mask = dev_priv->ops->hdmi_mask;
+                       crtc_mask = dev_priv->ops->hdmi_mask;
                        clone_mask = (1 << INTEL_OUTPUT_HDMI);
                        break;
                case INTEL_OUTPUT_DISPLAYPORT:
                        crtc_mask = (1 << 0) | (1 << 1);
-                       clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_EDP:
                        crtc_mask = (1 << 1);
-                       clone_mask = (1 << INTEL_OUTPUT_EDP);
+                       clone_mask = 0;
                }
                encoder->possible_crtcs = crtc_mask;
                encoder->possible_clones =
index d4c65f26892219209fa9eb4beb5cd3bfecda342a..c976a9dd9240d907b38df1018818cac39f7d62cc 100644 (file)
@@ -27,6 +27,8 @@
 
 #include <linux/delay.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "mdfld_dsi_dpi.h"
 #include "mdfld_dsi_pkg_sender.h"
 #include "mdfld_output.h"
@@ -993,10 +995,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
        /*create drm encoder object*/
        connector = &dsi_connector->base.base;
        encoder = &dpi_output->base.base.base;
-       drm_encoder_init(dev,
-                       encoder,
-                       p_funcs->encoder_funcs,
-                       DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
        drm_encoder_helper_add(encoder,
                                p_funcs->encoder_helper_funcs);
 
@@ -1006,10 +1005,10 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
        /*set possible crtcs and clones*/
        if (dsi_connector->pipe) {
                encoder->possible_crtcs = (1 << 2);
-               encoder->possible_clones = (1 << 1);
+               encoder->possible_clones = 0;
        } else {
                encoder->possible_crtcs = (1 << 0);
-               encoder->possible_clones = (1 << 0);
+               encoder->possible_clones = 0;
        }
 
        dsi_connector->base.encoder = &dpi_output->base.base;
index 4fff110c492124e8629cf0c979c61e0dd1c30b72..aae2d358364cc522546e170641b175fe89318f72 100644 (file)
@@ -658,16 +658,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 
        dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
 
-#if 0
-       if (pipe == 1) {
-               if (!gma_power_begin(dev, true))
-                       return 0;
-               android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
-                       x, y, old_fb);
-               goto mrst_crtc_mode_set_exit;
-       }
-#endif
-
        ret = check_fb(crtc->primary->fb);
        if (ret)
                return ret;
@@ -918,14 +908,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
                }
                dpll = 0;
 
-#if 0 /* FIXME revisit later */
-               if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
-                                               ksel == KSEL_BYPASS_25)
-                       dpll &= ~MDFLD_INPUT_REF_SEL;
-               else if (ksel == KSEL_BYPASS_83_100)
-                       dpll |= MDFLD_INPUT_REF_SEL;
-#endif /* FIXME revisit later */
-
                if (is_hdmi)
                        dpll |= MDFLD_VCO_SEL;
 
@@ -935,20 +917,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
                /* compute bitmask from p1 value */
                dpll |= (1 << (clock.p1 - 2)) << 17;
 
-#if 0 /* 1080p30 & 720p */
-               dpll = 0x00050000;
-               fp = 0x000001be;
-#endif
-#if 0 /* 480p */
-               dpll = 0x02010000;
-               fp = 0x000000d2;
-#endif
        } else {
-#if 0 /*DBI_TPO_480x864*/
-               dpll = 0x00020000;
-               fp = 0x00000156;
-#endif /* DBI_TPO_480x864 */ /* get from spec. */
-
                dpll = 0x00800000;
                fp = 0x000000c1;
        }
index ab2b27c0f037bdfa431fa79a55aa7b3c92d4d713..17a944d70add3a4532eee94980bd49e80ed0f567 100644 (file)
@@ -51,7 +51,6 @@ struct panel_info {
 };
 
 struct panel_funcs {
-       const struct drm_encoder_funcs *encoder_funcs;
        const struct drm_encoder_helper_funcs *encoder_helper_funcs;
        struct drm_display_mode * (*get_config_mode)(struct drm_device *);
        int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
index 49c92debb7b25137bf4dab95818de89c8c932bad..25e897b98f8621587b3d482aac668e490047ace4 100644 (file)
@@ -188,13 +188,7 @@ static const struct drm_encoder_helper_funcs
        .commit = mdfld_dsi_dpi_commit,
 };
 
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 const struct panel_funcs mdfld_tmd_vid_funcs = {
-       .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
        .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
        .get_config_mode = &tmd_vid_get_config_mode,
        .get_panel_info = tmd_vid_get_panel_info,
index a9420bf9a4198ef0f7dcd59f5d8564d4b45b28af..11845978fb0a68efa7679332aa7f534b5e5f845a 100644 (file)
@@ -76,13 +76,7 @@ static const struct drm_encoder_helper_funcs
        .commit = mdfld_dsi_dpi_commit,
 };
 
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 const struct panel_funcs mdfld_tpo_vid_funcs = {
-       .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
        .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
        .get_config_mode = &tpo_vid_get_config_mode,
        .get_panel_info = tpo_vid_get_panel_info,
index f4370232767d3d70ce7e65f8ae7af2d622f60419..a097a59a9eaec1929487f7661f7d4843b2f06d21 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/delay.h>
 
 #include <drm/drm.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "psb_drv.h"
 #include "psb_intel_drv.h"
@@ -620,15 +621,6 @@ static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
        .destroy = oaktrail_hdmi_destroy,
 };
 
-static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
-       .destroy = oaktrail_hdmi_enc_destroy,
-};
-
 void oaktrail_hdmi_init(struct drm_device *dev,
                                        struct psb_intel_mode_device *mode_dev)
 {
@@ -651,9 +643,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
                           &oaktrail_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_DVID);
 
-       drm_encoder_init(dev, encoder,
-                        &oaktrail_hdmi_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
 
@@ -673,11 +663,6 @@ failed_connector:
        kfree(gma_encoder);
 }
 
-static const struct pci_device_id hdmi_ids[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
-       { 0 }
-};
-
 void oaktrail_hdmi_setup(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
index 582e095975002de841d032a5f53f364ceebf6bc0..2828360153d16e6e19eaf53841cd42e905c83c13 100644 (file)
@@ -13,6 +13,8 @@
 
 #include <asm/intel-mid.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "intel_bios.h"
 #include "power.h"
 #include "psb_drv.h"
@@ -311,8 +313,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
                           &psb_intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
-       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_LVDS;
index 16c6136f778b07c32bc7642e4ad2bb79a0b28e74..fb601983cef0f8e79245b48d3545f2790a9d2790 100644 (file)
@@ -252,7 +252,6 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
                                        struct drm_property *property,
                                        uint64_t value);
 extern void psb_intel_lvds_destroy(struct drm_connector *connector);
-extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
 
 /* intel_gmbus.c */
 extern void gma_intel_i2c_reset(struct drm_device *dev);
index afaebab7bc17f79d6f222b993ef2278b15892c23..063c66bb946d0c21872e0ad72db962d60ed565d5 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "intel_bios.h"
 #include "power.h"
 #include "psb_drv.h"
@@ -621,18 +623,6 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
        .destroy = psb_intel_lvds_destroy,
 };
 
-
-static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
-       .destroy = psb_intel_lvds_enc_destroy,
-};
-
-
-
 /**
  * psb_intel_lvds_init - setup LVDS connectors on this device
  * @dev: drm device
@@ -683,9 +673,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
                           &psb_intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
-       drm_encoder_init(dev, encoder,
-                        &psb_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_LVDS;
index 264d7ad004b46ffd767d11073d736420fffa25ee..68fb3d7c172b276f63ffcbfaa2e77451ad525755 100644 (file)
@@ -864,36 +864,6 @@ static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sd
        DRM_INFO("HDMI is not supported yet");
 
        return false;
-#if 0
-       struct dip_infoframe avi_if = {
-               .type = DIP_TYPE_AVI,
-               .ver = DIP_VERSION_AVI,
-               .len = DIP_LEN_AVI,
-       };
-       uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
-       uint8_t set_buf_index[2] = { 1, 0 };
-       uint64_t *data = (uint64_t *)&avi_if;
-       unsigned i;
-
-       intel_dip_infoframe_csum(&avi_if);
-
-       if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
-                                 SDVO_CMD_SET_HBUF_INDEX,
-                                 set_buf_index, 2))
-               return false;
-
-       for (i = 0; i < sizeof(avi_if); i += 8) {
-               if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
-                                         SDVO_CMD_SET_HBUF_DATA,
-                                         data, 8))
-                       return false;
-               data++;
-       }
-
-       return psb_intel_sdvo_set_value(psb_intel_sdvo,
-                                   SDVO_CMD_SET_HBUF_TXRATE,
-                                   &tx_rate, 1);
-#endif
 }
 
 static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
@@ -1227,75 +1197,6 @@ static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdv
        return true;
 }
 
-/* No use! */
-#if 0
-struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
-       struct drm_connector *connector = NULL;
-       struct psb_intel_sdvo *iout = NULL;
-       struct psb_intel_sdvo *sdvo;
-
-       /* find the sdvo connector */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               iout = to_psb_intel_sdvo(connector);
-
-               if (iout->type != INTEL_OUTPUT_SDVO)
-                       continue;
-
-               sdvo = iout->dev_priv;
-
-               if (sdvo->sdvo_reg == SDVOB && sdvoB)
-                       return connector;
-
-               if (sdvo->sdvo_reg == SDVOC && !sdvoB)
-                       return connector;
-
-       }
-
-       return NULL;
-}
-
-int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
-{
-       u8 response[2];
-       u8 status;
-       struct psb_intel_sdvo *psb_intel_sdvo;
-       DRM_DEBUG_KMS("\n");
-
-       if (!connector)
-               return 0;
-
-       psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
-       return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
-                                   &response, 2) && response[0];
-}
-
-void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
-{
-       u8 response[2];
-       u8 status;
-       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
-       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
-       if (on) {
-               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
-               status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
-               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       } else {
-               response[0] = 0;
-               response[1] = 0;
-               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       }
-
-       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-}
-#endif
-
 static bool
 psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
 {
index 9e8224456ea29ac9ff62ba1747c28e118e2a57b9..e5bdd99ad453f4611e98411f03e9f226499044d8 100644 (file)
@@ -747,11 +747,11 @@ static int cmi_lcd_hack_create_device(void)
                return -EINVAL;
        }
 
-       client = i2c_new_device(adapter, &info);
-       if (!client) {
-               pr_err("%s: i2c_new_device() failed\n", __func__);
+       client = i2c_new_client_device(adapter, &info);
+       if (IS_ERR(client)) {
+               pr_err("%s: creating I2C device failed\n", __func__);
                i2c_put_adapter(adapter);
-               return -EINVAL;
+               return PTR_ERR(client);
        }
 
        return 0;
@@ -765,12 +765,7 @@ static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
        .commit = mdfld_dsi_dpi_commit,
 };
 
-static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 const struct panel_funcs mdfld_tc35876x_funcs = {
-       .encoder_funcs = &tc35876x_encoder_funcs,
        .encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
        .get_config_mode = tc35876x_get_config_mode,
        .get_panel_info = tc35876x_get_panel_info,
index 55b46a7150a5e1b08983ef73f6beee24478ec282..cc70e836522f05532e12707194012f505c774933 100644 (file)
@@ -94,6 +94,10 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
+       if (state->fb->pitches[0] % 128 != 0) {
+               DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n");
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -119,11 +123,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
        writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
 
        reg = state->fb->width * (state->fb->format->cpp[0]);
-       /* now line_pad is 16 */
-       reg = PADDING(16, reg);
 
-       line_l = state->fb->width * state->fb->format->cpp[0];
-       line_l = PADDING(16, line_l);
+       line_l = state->fb->pitches[0];
        writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
               HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
               priv->mmio + HIBMC_CRT_FB_WIDTH);
index 222356a4f9a84d019d6fa6df5780ac98911fa105..a6fd0c29e5b89cc5c657cb2c81c8b8a165f10ca9 100644 (file)
@@ -94,7 +94,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
        priv->dev->mode_config.max_height = 1200;
 
        priv->dev->mode_config.fb_base = priv->fb_base;
-       priv->dev->mode_config.preferred_depth = 24;
+       priv->dev->mode_config.preferred_depth = 32;
        priv->dev->mode_config.prefer_shadow = 1;
 
        priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -307,11 +307,7 @@ static int hibmc_load(struct drm_device *dev)
        /* reset all the states of crtc/plane/encoder/connector */
        drm_mode_config_reset(dev);
 
-       ret = drm_fbdev_generic_setup(dev, 16);
-       if (ret) {
-               DRM_ERROR("failed to initialize fbdev: %d\n", ret);
-               goto err;
-       }
+       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
 
        return 0;
 
index 99397ac3b363723d3aa3e4cc579ca438fd9d52c7..322bd542e89d933174966dde854523235b268a53 100644 (file)
@@ -50,7 +50,7 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
 int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
                      struct drm_mode_create_dumb *args)
 {
-       return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
+       return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
 }
 
 const struct drm_mode_config_funcs hibmc_mode_funcs = {
index f31068d74b18f3c56763466b013ba583aba7f59f..00e87c2907963ef65e0dda40ffa2d2634d26da38 100644 (file)
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_device.h>
-#include <drm/drm_encoder_slave.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "dw_dsi_reg.h"
 
@@ -696,10 +696,6 @@ static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
        .disable        = dsi_encoder_disable
 };
 
-static const struct drm_encoder_funcs dw_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int dw_drm_encoder_init(struct device *dev,
                               struct drm_device *drm_dev,
                               struct drm_encoder *encoder)
@@ -713,8 +709,7 @@ static int dw_drm_encoder_init(struct device *dev,
        }
 
        encoder->possible_crtcs = crtc_mask;
-       ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
-                              DRM_MODE_ENCODER_DSI, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
        if (ret) {
                DRM_ERROR("failed to init dsi encoder\n");
                return ret;
index 86000127d4eec54465ee2e4db8403c6f52cac117..c339e632522a91535c567287c9b1072de6870e4c 100644 (file)
@@ -940,7 +940,6 @@ static struct drm_driver ade_driver = {
 };
 
 struct kirin_drm_data ade_driver_data = {
-       .register_connects = false,
        .num_planes = ADE_CH_NUM,
        .prim_plane = ADE_CH1,
        .channel_formats = channel_formats,
index d3145ae877d74d766925d703326471d12134b17e..4349da3e2379c5b21d5403bd70dffc0aee7f832c 100644 (file)
@@ -219,40 +219,6 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
        return 0;
 }
 
-static int kirin_drm_connectors_register(struct drm_device *dev)
-{
-       struct drm_connector *connector;
-       struct drm_connector *failed_connector;
-       struct drm_connector_list_iter conn_iter;
-       int ret;
-
-       mutex_lock(&dev->mode_config.mutex);
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               ret = drm_connector_register(connector);
-               if (ret) {
-                       failed_connector = connector;
-                       goto err;
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
-       mutex_unlock(&dev->mode_config.mutex);
-
-       return 0;
-
-err:
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               if (failed_connector == connector)
-                       break;
-               drm_connector_unregister(connector);
-       }
-       drm_connector_list_iter_end(&conn_iter);
-       mutex_unlock(&dev->mode_config.mutex);
-
-       return ret;
-}
-
 static int kirin_drm_bind(struct device *dev)
 {
        struct kirin_drm_data *driver_data;
@@ -279,17 +245,8 @@ static int kirin_drm_bind(struct device *dev)
 
        drm_fbdev_generic_setup(drm_dev, 32);
 
-       /* connectors should be registered after drm device register */
-       if (driver_data->register_connects) {
-               ret = kirin_drm_connectors_register(drm_dev);
-               if (ret)
-                       goto err_drm_dev_unregister;
-       }
-
        return 0;
 
-err_drm_dev_unregister:
-       drm_dev_unregister(drm_dev);
 err_kms_cleanup:
        kirin_drm_kms_cleanup(drm_dev);
 err_drm_dev_put:
index 4d5c05a240652d865448f3d3e5943a9e2993572a..dee8ec2f7f2ec19247b16b13035e49d52c2cf8fd 100644 (file)
@@ -37,7 +37,6 @@ struct kirin_drm_data {
        u32 channel_formats_cnt;
        int config_max_width;
        int config_max_height;
-       bool register_connects;
        u32 num_planes;
        u32 prim_plane;
 
index a839f78a4c8a3037d8335330bfb86f033bc740a0..741886b5441973b19816a229a4a317dd0c14bf2a 100644 (file)
@@ -393,7 +393,7 @@ sil164_detect_slave(struct i2c_client *client)
                return NULL;
        }
 
-       return i2c_new_device(adap, &info);
+       return i2c_new_client_device(adap, &info);
 }
 
 static int
@@ -402,6 +402,7 @@ sil164_encoder_init(struct i2c_client *client,
                    struct drm_encoder_slave *encoder)
 {
        struct sil164_priv *priv;
+       struct i2c_client *slave_client;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -410,7 +411,9 @@ sil164_encoder_init(struct i2c_client *client,
        encoder->slave_priv = priv;
        encoder->slave_funcs = &sil164_encoder_funcs;
 
-       priv->duallink_slave = sil164_detect_slave(client);
+       slave_client = sil164_detect_slave(client);
+       if (!IS_ERR(slave_client))
+               priv->duallink_slave = slave_client;
 
        return 0;
 }
index c3332209f27a8dc982eafaaedbbd3dce9ac80935..9517f522dcb9c20b4dadd6b76451b1e9b7d3829d 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/i2c/tda998x.h>
 
 #include <media/cec-notifier.h>
@@ -1132,7 +1133,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
        mutex_unlock(&priv->audio_mutex);
 }
 
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int tda998x_audio_digital_mute(struct device *dev, void *data,
+                                     bool enable)
 {
        struct tda998x_priv *priv = dev_get_drvdata(dev);
 
@@ -1949,9 +1951,9 @@ static int tda998x_create(struct device *dev)
        cec_info.platform_data = &priv->cec_glue;
        cec_info.irq = client->irq;
 
-       priv->cec = i2c_new_device(client->adapter, &cec_info);
-       if (!priv->cec) {
-               ret = -ENODEV;
+       priv->cec = i2c_new_client_device(client->adapter, &cec_info);
+       if (IS_ERR(priv->cec)) {
+               ret = PTR_ERR(priv->cec);
                goto fail;
        }
 
@@ -1997,15 +1999,6 @@ err_irq:
 
 /* DRM encoder functions */
 
-static void tda998x_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs tda998x_encoder_funcs = {
-       .destroy = tda998x_encoder_destroy,
-};
-
 static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
 {
        struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -2023,8 +2016,8 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
 
        priv->encoder.possible_crtcs = crtcs;
 
-       ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm, &priv->encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret)
                goto err_encoder;
 
index 9c8af50011e7e0f3c3e372f48f8b0bb532568142..9ea1a397d1b54e813822a42eb5cf05a85b7fe876 100644 (file)
@@ -8886,7 +8886,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 
        mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
 
-       mode->hsync = drm_mode_hsync(mode);
        mode->vrefresh = drm_mode_vrefresh(mode);
        drm_mode_set_name(mode);
 }
index 3d9dc27478b334d024b2cd5b569d1cd4dfaf811a..70525623bcdf094611c977d32851e11dcc507f5e 100644 (file)
@@ -632,15 +632,9 @@ static void intel_dp_info(struct seq_file *m,
 }
 
 static void intel_dp_mst_info(struct seq_file *m,
-                         struct intel_connector *intel_connector)
+                             struct intel_connector *intel_connector)
 {
-       struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
-       struct intel_dp_mst_encoder *intel_mst =
-               enc_to_mst(intel_encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-                                       intel_connector->port);
+       bool has_audio = intel_connector->port->has_audio;
 
        seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
 }
@@ -1984,7 +1978,7 @@ static const struct {
        {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
 };
 
-int intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct drm_i915_private *i915)
 {
        struct drm_minor *minor = i915->drm.primary;
        int i;
@@ -1997,9 +1991,9 @@ int intel_display_debugfs_register(struct drm_i915_private *i915)
                                    intel_display_debugfs_files[i].fops);
        }
 
-       return drm_debugfs_create_files(intel_display_debugfs_list,
-                                       ARRAY_SIZE(intel_display_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(intel_display_debugfs_list,
+                                ARRAY_SIZE(intel_display_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 
 static int i915_panel_show(struct seq_file *m, void *data)
index a3bea1ce04c2316f352c5efc32e7882eaa7b3d08..c922c1745bfe170d91af1f857d74db23306e9401 100644 (file)
@@ -10,10 +10,10 @@ struct drm_connector;
 struct drm_i915_private;
 
 #ifdef CONFIG_DEBUG_FS
-int intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct drm_i915_private *i915);
 int intel_connector_debugfs_add(struct drm_connector *connector);
 #else
-static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
 static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
 #endif
 
index 6a27e72ccf01814bca418ef5ce26f9012886662d..2bf3d4cb4ea983c872a9f19565f1bb953b9ab30c 100644 (file)
@@ -438,7 +438,7 @@ struct intel_connector {
           state of connector->polled in case hotplug storm detection changes it */
        u8 polled;
 
-       void *port; /* store this opaque as its illegal to dereference it */
+       struct drm_dp_mst_port *port;
 
        struct intel_dp *mst_port;
 
index 8c732418a33fc08f9e21b4f48961a9aa51414f13..d18b406f2a7d2384bf3ed11e909f4d0bf8315c02 100644 (file)
@@ -113,9 +113,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
        pipe_config->has_pch_encoder = false;
 
        if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
-               pipe_config->has_audio =
-                       drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-                                                 connector->port);
+               pipe_config->has_audio = connector->port->has_audio;
        else
                pipe_config->has_audio =
                        intel_conn_state->force_audio == HDMI_AUDIO_ON;
index 698e22420dc5e946c17e956ab8fb57f3b6b8722d..7fe9831aa9bab9b7591b6c6855553ee67848fcda 100644 (file)
@@ -10,8 +10,6 @@
 
 #include <drm/drm.h> /* for drm_legacy.h! */
 #include <drm/drm_cache.h>
-#include <drm/drm_legacy.h> /* for drm_pci.h! */
-#include <drm/drm_pci.h>
 
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
index 074c4efb58eb96e7d25045ddfd1b6a76c1995973..eee530453aa6772d94ea095dfe46bce3db8c7256 100644 (file)
@@ -131,6 +131,7 @@ struct kvmgt_vdev {
        struct work_struct release_work;
        atomic_t released;
        struct vfio_device *vfio_device;
+       struct vfio_group *vfio_group;
 };
 
 static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
@@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
                unsigned long size)
 {
        struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+       struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
        int total_pages;
        int npage;
        int ret;
@@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
        for (npage = 0; npage < total_pages; npage++) {
                unsigned long cur_gfn = gfn + npage;
 
-               ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
+               ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
                drm_WARN_ON(&i915->drm, ret != 1);
        }
 }
@@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
                unsigned long size, struct page **page)
 {
+       struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
        unsigned long base_pfn = 0;
        int total_pages;
        int npage;
@@ -183,8 +186,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
                unsigned long cur_gfn = gfn + npage;
                unsigned long pfn;
 
-               ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
-                                    IOMMU_READ | IOMMU_WRITE, &pfn);
+               ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
+                                          IOMMU_READ | IOMMU_WRITE, &pfn);
                if (ret != 1) {
                        gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
                                     cur_gfn, ret);
@@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
        unsigned long events;
        int ret;
+       struct vfio_group *vfio_group;
 
        vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
        vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
@@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev)
                goto undo_iommu;
        }
 
+       vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
+       if (IS_ERR_OR_NULL(vfio_group)) {
+               ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
+               gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
+               goto undo_register;
+       }
+       vdev->vfio_group = vfio_group;
+
        /* Take a module reference as mdev core doesn't take
         * a reference for vendor driver.
         */
@@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        return ret;
 
 undo_group:
+       vfio_group_put_external_user(vdev->vfio_group);
+       vdev->vfio_group = NULL;
+
+undo_register:
        vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
                                        &vdev->group_notifier);
 
@@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
        kvmgt_guest_exit(info);
 
        intel_vgpu_release_msi_eventfd_ctx(vgpu);
+       vfio_group_put_external_user(vdev->vfio_group);
 
        vdev->kvm = NULL;
        vgpu->handle = 0;
@@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
                        void *buf, unsigned long len, bool write)
 {
        struct kvmgt_guest_info *info;
-       struct kvm *kvm;
-       int idx, ret;
-       bool kthread = current->mm == NULL;
 
        if (!handle_valid(handle))
                return -ESRCH;
 
        info = (struct kvmgt_guest_info *)handle;
-       kvm = info->kvm;
-
-       if (kthread) {
-               if (!mmget_not_zero(kvm->mm))
-                       return -EFAULT;
-               use_mm(kvm->mm);
-       }
-
-       idx = srcu_read_lock(&kvm->srcu);
-       ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
-                     kvm_read_guest(kvm, gpa, buf, len);
-       srcu_read_unlock(&kvm->srcu, idx);
-
-       if (kthread) {
-               unuse_mm(kvm->mm);
-               mmput(kvm->mm);
-       }
 
-       return ret;
+       return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
+                          gpa, buf, len, write);
 }
 
 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
index 4481feb34bc57066f9ca70c7bb666e73c20b4089..bca036ac662129414d38168f17fd85db68b684e8 100644 (file)
@@ -1898,7 +1898,7 @@ static const struct i915_debugfs_files {
 #endif
 };
 
-int i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *dev_priv)
 {
        struct drm_minor *minor = dev_priv->drm.primary;
        int i;
@@ -1915,7 +1915,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
                                    i915_debugfs_files[i].fops);
        }
 
-       return drm_debugfs_create_files(i915_debugfs_list,
-                                       I915_DEBUGFS_ENTRIES,
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(i915_debugfs_list,
+                                I915_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
 }
index 6da39c76ab5e7e687e83bbb4c3fbd453a2e12bbb..1de2736f124872e25cf269afa1913f8668f251ac 100644 (file)
@@ -12,10 +12,10 @@ struct drm_i915_private;
 struct seq_file;
 
 #ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_register(struct drm_i915_private *dev_priv);
 void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
 #else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline void i915_debugfs_register(struct drm_i915_private *dev_priv) {}
 static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
 #endif
 
index c08b165a9cb42488ba4aec98d4bff7b06b1656bf..34ee12f3f02d465d4ad7703548080fb97ba8b3a2 100644 (file)
@@ -43,6 +43,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 
 #include "display/intel_acpi.h"
@@ -905,17 +906,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
                (struct intel_device_info *)ent->driver_data;
        struct intel_device_info *device_info;
        struct drm_i915_private *i915;
-       int err;
 
-       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
-       if (!i915)
-               return ERR_PTR(-ENOMEM);
-
-       err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
-       if (err) {
-               kfree(i915);
-               return ERR_PTR(err);
-       }
+       i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
+                                 struct drm_i915_private, drm);
+       if (IS_ERR(i915))
+               return i915;
 
        i915->drm.pdev = pdev;
        pci_set_drvdata(pdev, i915);
@@ -930,17 +925,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
        return i915;
 }
 
-static void i915_driver_destroy(struct drm_i915_private *i915)
-{
-       struct pci_dev *pdev = i915->drm.pdev;
-
-       drm_dev_fini(&i915->drm);
-       kfree(i915);
-
-       /* And make sure we never chase our dangling pointer from pci_dev */
-       pci_set_drvdata(pdev, NULL);
-}
-
 /**
  * i915_driver_probe - setup chip and create an initial config
  * @pdev: PCI device
@@ -1022,6 +1006,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        i915_welcome_messages(i915);
 
+       i915->do_release = true;
+
        return 0;
 
 out_cleanup_irq:
@@ -1041,7 +1027,6 @@ out_pci_disable:
        pci_disable_device(pdev);
 out_fini:
        i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
-       i915_driver_destroy(i915);
        return ret;
 }
 
@@ -1081,6 +1066,9 @@ static void i915_driver_release(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
 
+       if (!dev_priv->do_release)
+               return;
+
        disable_rpm_wakeref_asserts(rpm);
 
        i915_gem_driver_release(dev_priv);
@@ -1094,7 +1082,6 @@ static void i915_driver_release(struct drm_device *dev)
        intel_runtime_pm_driver_release(rpm);
 
        i915_driver_late_release(dev_priv);
-       i915_driver_destroy(dev_priv);
 }
 
 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
index e993c64a02b09ca1ddd518ca71824baaf0f3a94a..adb9bf34cf97a3bb21690c8ec73bbeb382a0f7b3 100644 (file)
@@ -826,6 +826,9 @@ struct i915_selftest_stash {
 struct drm_i915_private {
        struct drm_device drm;
 
+       /* FIXME: Device release actions should all be moved to drmm_ */
+       bool do_release;
+
        const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
        struct intel_driver_caps caps;
index 193048ce3c3ac6339a9927a3ba437d2ddbee2abf..eb0b5be7c35d33305fbb490aaf0aa2c1e83b0fce 100644 (file)
@@ -955,8 +955,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
 
        i915_driver_remove(i915);
        pci_set_drvdata(pdev, NULL);
-
-       drm_dev_put(&i915->drm);
 }
 
 /* is device_id present in comma separated list of ids */
index 47fde54150f4cefe68ddd75d9d41342e9d7d4200..9b105b811f1f4514a9c5142a0eea05c2919bd78f 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_managed.h>
+
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_requests.h"
 #include "gt/mock_engine.h"
@@ -55,6 +57,9 @@ static void mock_device_release(struct drm_device *dev)
 {
        struct drm_i915_private *i915 = to_i915(dev);
 
+       if (!i915->do_release)
+               goto out;
+
        mock_device_flush(i915);
        intel_gt_driver_remove(&i915->gt);
 
@@ -71,8 +76,9 @@ static void mock_device_release(struct drm_device *dev)
 
        drm_mode_config_cleanup(&i915->drm);
 
-       drm_dev_fini(&i915->drm);
+out:
        put_device(&i915->drm.pdev->dev);
+       i915->drm.pdev = NULL;
 }
 
 static struct drm_driver mock_driver = {
@@ -114,9 +120,14 @@ struct drm_i915_private *mock_gem_device(void)
        struct pci_dev *pdev;
        int err;
 
-       pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
+       pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
-               goto err;
+               return NULL;
+       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+       if (!i915) {
+               kfree(pdev);
+               return NULL;
+       }
 
        device_initialize(&pdev->dev);
        pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
@@ -129,7 +140,6 @@ struct drm_i915_private *mock_gem_device(void)
        pdev->dev.archdata.iommu = (void *)-1;
 #endif
 
-       i915 = (struct drm_i915_private *)(pdev + 1);
        pci_set_drvdata(pdev, i915);
 
        dev_pm_domain_set(&pdev->dev, &pm_domain);
@@ -141,9 +151,13 @@ struct drm_i915_private *mock_gem_device(void)
        err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
        if (err) {
                pr_err("Failed to initialise mock GEM device: err=%d\n", err);
-               goto put_device;
+               put_device(&pdev->dev);
+               kfree(i915);
+
+               return NULL;
        }
        i915->drm.pdev = pdev;
+       drmm_add_final_kfree(&i915->drm, i915);
 
        intel_runtime_pm_init_early(&i915->runtime_pm);
 
@@ -188,6 +202,8 @@ struct drm_i915_private *mock_gem_device(void)
        __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
        intel_engines_driver_register(i915);
 
+       i915->do_release = true;
+
        return i915;
 
 err_context:
@@ -198,9 +214,7 @@ err_drv:
        intel_gt_driver_late_release(&i915->gt);
        intel_memory_regions_driver_release(i915);
        drm_mode_config_cleanup(&i915->drm);
-       drm_dev_fini(&i915->drm);
-put_device:
-       put_device(&pdev->dev);
-err:
+       drm_dev_put(&i915->drm);
+
        return NULL;
 }
index f22cfbf9353ede21121809335751130a9777217a..ba4ca17fd4d8507186261c2abffeb504fe165cc3 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -143,10 +144,6 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs =
        .atomic_check = dw_hdmi_imx_atomic_check,
 };
 
-static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_mode_status
 imx6q_hdmi_mode_valid(struct drm_connector *con,
                      const struct drm_display_mode *mode)
@@ -236,8 +233,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
                return ret;
 
        drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        platform_set_drvdata(pdev, hdmi);
 
index da87c70e413b4d6d821b991621563c3edf618fc0..2e38f1a5cf8da6a195a9a6ed85f9483960a69953 100644 (file)
@@ -42,12 +42,6 @@ void imx_drm_connector_destroy(struct drm_connector *connector)
 }
 EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
 
-void imx_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-
 static int imx_drm_atomic_check(struct drm_device *dev,
                                struct drm_atomic_state *state)
 {
@@ -139,8 +133,8 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
 
        encoder->possible_crtcs = crtc_mask;
 
-       /* FIXME: this is the mask of outputs which can clone this output. */
-       encoder->possible_clones = ~0;
+       /* FIXME: cloning support not clear, disable it all for now */
+       encoder->possible_clones = 0;
 
        return 0;
 }
index ab9c6f706eb3d8b87db9ac4bac336bf7cd3c9cd9..c3e1a3f14d30cd00de6bdb9352a9d29f90c853ac 100644 (file)
@@ -38,7 +38,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
        struct drm_encoder *encoder, struct device_node *np);
 
 void imx_drm_connector_destroy(struct drm_connector *connector);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder);
 
 int ipu_planes_assign_pre(struct drm_device *dev,
                          struct drm_atomic_state *state);
index 4da22a94790cfca5059de6ab720067154e3cd9ba..66ea68e8da8757d23c33f345280e6c6c60f6beee 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -393,10 +394,6 @@ static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs =
        .best_encoder = imx_ldb_connector_best_encoder,
 };
 
-static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
-       .destroy = imx_drm_encoder_destroy,
-};
-
 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
        .atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
        .enable = imx_ldb_encoder_enable,
@@ -441,8 +438,7 @@ static int imx_ldb_register(struct drm_device *drm,
        }
 
        drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
 
        if (imx_ldb_ch->bridge) {
                ret = drm_bridge_attach(&imx_ldb_ch->encoder,
index 5bbfaa2cd0f47af15f0594f5001857ed5440dcbb..ee63782c77e9cc12db78c643e5f32dd7d7954470 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -348,10 +349,6 @@ static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs =
        .mode_valid = imx_tve_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
-       .destroy = imx_drm_encoder_destroy,
-};
-
 static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
        .mode_set = imx_tve_encoder_mode_set,
        .enable = imx_tve_encoder_enable,
@@ -479,8 +476,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
                return ret;
 
        drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
-       drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
-                        encoder_type, NULL);
+       drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
 
        drm_connector_helper_add(&tve->connector,
                        &imx_tve_connector_helper_funcs);
index 08fafa4bf8c21302d8d0e464081f6b29281f0291..ac916c84a63185ef584569ab5bba2c599d96c95d 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -256,10 +257,6 @@ static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
        .best_encoder = imx_pd_connector_best_encoder,
 };
 
-static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
-       .destroy = imx_drm_encoder_destroy,
-};
-
 static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
        .enable = imx_pd_bridge_enable,
        .disable = imx_pd_bridge_disable,
@@ -288,8 +285,7 @@ static int imx_pd_register(struct drm_device *drm,
         */
        imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
-                        DRM_MODE_ENCODER_NONE, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
 
        imxpd->bridge.funcs = &imx_pd_bridge_funcs;
        drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
index 9dfe7cb530e1184c5d158c12622bcd5242dd1fa7..632d72177123c524bbff14840991d96f6475b21a 100644 (file)
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_plane.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
 
 #define JZ_REG_LCD_CFG                         0x00
@@ -488,15 +490,6 @@ static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static void ingenic_drm_release(struct drm_device *drm)
-{
-       struct ingenic_drm *priv = drm_device_get_priv(drm);
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(priv);
-}
-
 static int ingenic_drm_enable_vblank(struct drm_crtc *crtc)
 {
        struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
@@ -540,7 +533,6 @@ static struct drm_driver ingenic_drm_driver_data = {
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
 
        .irq_handler            = ingenic_drm_irq_handler,
-       .release                = ingenic_drm_release,
 };
 
 static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -592,10 +584,6 @@ static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
        .atomic_commit          = drm_atomic_helper_commit,
 };
 
-static const struct drm_encoder_funcs ingenic_drm_encoder_funcs = {
-       .destroy                = drm_encoder_cleanup,
-};
-
 static void ingenic_drm_free_dma_hwdesc(void *d)
 {
        struct ingenic_drm *priv = d;
@@ -623,24 +611,21 @@ static int ingenic_drm_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
+                                 struct ingenic_drm, drm);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
        priv->soc_info = soc_info;
        priv->dev = dev;
        drm = &priv->drm;
-       drm->dev_private = priv;
 
        platform_set_drvdata(pdev, priv);
 
-       ret = devm_drm_dev_init(dev, drm, &ingenic_drm_driver_data);
-       if (ret) {
-               kfree(priv);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
                return ret;
-       }
 
-       drm_mode_config_init(drm);
        drm->mode_config.min_width = 0;
        drm->mode_config.min_height = 0;
        drm->mode_config.max_width = soc_info->max_width;
@@ -661,10 +646,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "Failed to get platform irq");
+       if (irq < 0)
                return irq;
-       }
 
        if (soc_info->needs_dev_clk) {
                priv->lcd_clk = devm_clk_get(dev, "lcd");
@@ -730,8 +713,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
        drm_encoder_helper_add(&priv->encoder,
                               &ingenic_drm_encoder_helper_funcs);
 
-       ret = drm_encoder_init(drm, &priv->encoder, &ingenic_drm_encoder_funcs,
-                              DRM_MODE_ENCODER_DPI, NULL);
+       ret = drm_simple_encoder_init(drm, &priv->encoder,
+                                     DRM_MODE_ENCODER_DPI);
        if (ret) {
                dev_err(dev, "Failed to init encoder: %i", ret);
                return ret;
@@ -791,9 +774,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
                goto err_devclk_disable;
        }
 
-       ret = drm_fbdev_generic_setup(drm, 32);
-       if (ret)
-               dev_warn(dev, "Unable to start fbdev emulation: %i", ret);
+       drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
index d589f09d04d97f1ee80417764b47b2befa9095f4..fa1d4f5df31e86f4021d61950cef445c845a1c17 100644 (file)
@@ -10,5 +10,7 @@ config DRM_LIMA
        depends on OF
        select DRM_SCHED
        select DRM_GEM_SHMEM_HELPER
+       select PM_DEVFREQ
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
        help
         DRM driver for ARM Mali 400/450 GPUs.
index a85444b0a1d458594c48ada319c914a7b8d01d79..ca2097b8e1ad0518022ea772e8ebce2ec691e7f8 100644 (file)
@@ -14,6 +14,8 @@ lima-y := \
        lima_sched.o \
        lima_ctx.o \
        lima_dlbu.o \
-       lima_bcast.o
+       lima_bcast.o \
+       lima_trace.o \
+       lima_devfreq.o
 
 obj-$(CONFIG_DRM_LIMA) += lima.o
index 288398027bfa81f4874290d35b95cf9d794eac81..fbc43f243c54d23721837c0f50477c6bdd6a606a 100644 (file)
@@ -26,18 +26,33 @@ void lima_bcast_enable(struct lima_device *dev, int num_pp)
        bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
 }
 
+static int lima_bcast_hw_init(struct lima_ip *ip)
+{
+       bcast_write(LIMA_BCAST_BROADCAST_MASK, ip->data.mask << 16);
+       bcast_write(LIMA_BCAST_INTERRUPT_MASK, ip->data.mask);
+       return 0;
+}
+
+int lima_bcast_resume(struct lima_ip *ip)
+{
+       return lima_bcast_hw_init(ip);
+}
+
+void lima_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_bcast_init(struct lima_ip *ip)
 {
-       int i, mask = 0;
+       int i;
 
        for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
                if (ip->dev->ip[i].present)
-                       mask |= 1 << (i - lima_ip_pp0);
+                       ip->data.mask |= 1 << (i - lima_ip_pp0);
        }
 
-       bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
-       bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
-       return 0;
+       return lima_bcast_hw_init(ip);
 }
 
 void lima_bcast_fini(struct lima_ip *ip)
index c47e58563d0a208e539a4190a4e6a832e6ce6da8..465ee587bceb2f22b3552f8bb7b01338a74990bf 100644 (file)
@@ -6,6 +6,8 @@
 
 struct lima_ip;
 
+int lima_bcast_resume(struct lima_ip *ip);
+void lima_bcast_suspend(struct lima_ip *ip);
 int lima_bcast_init(struct lima_ip *ip);
 void lima_bcast_fini(struct lima_ip *ip);
 
index 22fff6caa961bf3fb5801d0d95975ec158a4a40e..891d5cd5019a7451200de317e9c9887b34d9377a 100644 (file)
@@ -27,6 +27,9 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
        if (err < 0)
                goto err_out0;
 
+       ctx->pid = task_pid_nr(current);
+       get_task_comm(ctx->pname, current);
+
        return 0;
 
 err_out0:
index 6154e5c9bfe4971ca094b4fe91beb988839cbd55..74e2be09090f65ef24872c6c534ff0a419c17e88 100644 (file)
@@ -5,6 +5,7 @@
 #define __LIMA_CTX_H__
 
 #include <linux/xarray.h>
+#include <linux/sched.h>
 
 #include "lima_device.h"
 
@@ -13,6 +14,10 @@ struct lima_ctx {
        struct lima_device *dev;
        struct lima_sched_context context[lima_pipe_num];
        atomic_t guilty;
+
+       /* debug info */
+       char pname[TASK_COMM_LEN];
+       pid_t pid;
 };
 
 struct lima_ctx_mgr {
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
new file mode 100644 (file)
index 0000000..bbe0281
--- /dev/null
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * Based on panfrost_devfreq.c:
+ *   Copyright 2019 Collabora ltd.
+ */
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+
+#include "lima_device.h"
+#include "lima_devfreq.h"
+
+static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq)
+{
+       ktime_t now, last;
+
+       now = ktime_get();
+       last = devfreq->time_last_update;
+
+       if (devfreq->busy_count > 0)
+               devfreq->busy_time += ktime_sub(now, last);
+       else
+               devfreq->idle_time += ktime_sub(now, last);
+
+       devfreq->time_last_update = now;
+}
+
+static int lima_devfreq_target(struct device *dev, unsigned long *freq,
+                              u32 flags)
+{
+       struct dev_pm_opp *opp;
+       int err;
+
+       opp = devfreq_recommended_opp(dev, freq, flags);
+       if (IS_ERR(opp))
+               return PTR_ERR(opp);
+       dev_pm_opp_put(opp);
+
+       err = dev_pm_opp_set_rate(dev, *freq);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static void lima_devfreq_reset(struct lima_devfreq *devfreq)
+{
+       devfreq->busy_time = 0;
+       devfreq->idle_time = 0;
+       devfreq->time_last_update = ktime_get();
+}
+
+static int lima_devfreq_get_dev_status(struct device *dev,
+                                      struct devfreq_dev_status *status)
+{
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       struct lima_devfreq *devfreq = &ldev->devfreq;
+       unsigned long irqflags;
+
+       status->current_frequency = clk_get_rate(ldev->clk_gpu);
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_update_utilization(devfreq);
+
+       status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time,
+                                                  devfreq->idle_time));
+       status->busy_time = ktime_to_ns(devfreq->busy_time);
+
+       lima_devfreq_reset(devfreq);
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+       dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+               status->busy_time, status->total_time,
+               status->busy_time / (status->total_time / 100),
+               status->current_frequency / 1000 / 1000);
+
+       return 0;
+}
+
+static struct devfreq_dev_profile lima_devfreq_profile = {
+       .polling_ms = 50, /* ~3 frames */
+       .target = lima_devfreq_target,
+       .get_dev_status = lima_devfreq_get_dev_status,
+};
+
+void lima_devfreq_fini(struct lima_device *ldev)
+{
+       struct lima_devfreq *devfreq = &ldev->devfreq;
+
+       if (devfreq->cooling) {
+               devfreq_cooling_unregister(devfreq->cooling);
+               devfreq->cooling = NULL;
+       }
+
+       if (devfreq->devfreq) {
+               devm_devfreq_remove_device(ldev->dev, devfreq->devfreq);
+               devfreq->devfreq = NULL;
+       }
+
+       if (devfreq->opp_of_table_added) {
+               dev_pm_opp_of_remove_table(ldev->dev);
+               devfreq->opp_of_table_added = false;
+       }
+
+       if (devfreq->regulators_opp_table) {
+               dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
+               devfreq->regulators_opp_table = NULL;
+       }
+
+       if (devfreq->clkname_opp_table) {
+               dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
+               devfreq->clkname_opp_table = NULL;
+       }
+}
+
+int lima_devfreq_init(struct lima_device *ldev)
+{
+       struct thermal_cooling_device *cooling;
+       struct device *dev = ldev->dev;
+       struct opp_table *opp_table;
+       struct devfreq *devfreq;
+       struct lima_devfreq *ldevfreq = &ldev->devfreq;
+       struct dev_pm_opp *opp;
+       unsigned long cur_freq;
+       int ret;
+
+       if (!device_property_present(dev, "operating-points-v2"))
+               /* Optional, continue without devfreq */
+               return 0;
+
+       spin_lock_init(&ldevfreq->lock);
+
+       opp_table = dev_pm_opp_set_clkname(dev, "core");
+       if (IS_ERR(opp_table)) {
+               ret = PTR_ERR(opp_table);
+               goto err_fini;
+       }
+
+       ldevfreq->clkname_opp_table = opp_table;
+
+       opp_table = dev_pm_opp_set_regulators(dev,
+                                             (const char *[]){ "mali" },
+                                             1);
+       if (IS_ERR(opp_table)) {
+               ret = PTR_ERR(opp_table);
+
+               /* Continue if the optional regulator is missing */
+               if (ret != -ENODEV)
+                       goto err_fini;
+       } else {
+               ldevfreq->regulators_opp_table = opp_table;
+       }
+
+       ret = dev_pm_opp_of_add_table(dev);
+       if (ret)
+               goto err_fini;
+       ldevfreq->opp_of_table_added = true;
+
+       lima_devfreq_reset(ldevfreq);
+
+       cur_freq = clk_get_rate(ldev->clk_gpu);
+
+       opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+       if (IS_ERR(opp)) {
+               ret = PTR_ERR(opp);
+               goto err_fini;
+       }
+
+       lima_devfreq_profile.initial_freq = cur_freq;
+       dev_pm_opp_put(opp);
+
+       devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile,
+                                         DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+       if (IS_ERR(devfreq)) {
+               dev_err(dev, "Couldn't initialize GPU devfreq\n");
+               ret = PTR_ERR(devfreq);
+               goto err_fini;
+       }
+
+       ldevfreq->devfreq = devfreq;
+
+       cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+       if (IS_ERR(cooling))
+               dev_info(dev, "Failed to register cooling device\n");
+       else
+               ldevfreq->cooling = cooling;
+
+       return 0;
+
+err_fini:
+       lima_devfreq_fini(ldev);
+       return ret;
+}
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq)
+{
+       unsigned long irqflags;
+
+       if (!devfreq->devfreq)
+               return;
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_update_utilization(devfreq);
+
+       devfreq->busy_count++;
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq)
+{
+       unsigned long irqflags;
+
+       if (!devfreq->devfreq)
+               return;
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_update_utilization(devfreq);
+
+       WARN_ON(--devfreq->busy_count < 0);
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq)
+{
+       unsigned long irqflags;
+
+       if (!devfreq->devfreq)
+               return 0;
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_reset(devfreq);
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+       return devfreq_resume_device(devfreq->devfreq);
+}
+
+int lima_devfreq_suspend(struct lima_devfreq *devfreq)
+{
+       if (!devfreq->devfreq)
+               return 0;
+
+       return devfreq_suspend_device(devfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h
new file mode 100644 (file)
index 0000000..5eed297
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */
+
+#ifndef __LIMA_DEVFREQ_H__
+#define __LIMA_DEVFREQ_H__
+
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+
+struct devfreq;
+struct opp_table;
+struct thermal_cooling_device;
+
+struct lima_device;
+
+struct lima_devfreq {
+       struct devfreq *devfreq;
+       struct opp_table *clkname_opp_table;
+       struct opp_table *regulators_opp_table;
+       struct thermal_cooling_device *cooling;
+       bool opp_of_table_added;
+
+       ktime_t busy_time;
+       ktime_t idle_time;
+       ktime_t time_last_update;
+       int busy_count;
+       /*
+        * Protect busy_time, idle_time, time_last_update and busy_count
+        * because these can be updated concurrently, for example by the GP
+        * and PP interrupts.
+        */
+       spinlock_t lock;
+};
+
+int lima_devfreq_init(struct lima_device *ldev);
+void lima_devfreq_fini(struct lima_device *ldev);
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq);
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq);
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq);
+int lima_devfreq_suspend(struct lima_devfreq *devfreq);
+
+#endif
index 19829b5430242f64d3fb88f6eb88548da9e5faf9..65fdca366e41f00d940f13a0865234fc47df5595 100644 (file)
@@ -25,6 +25,8 @@ struct lima_ip_desc {
 
        int (*init)(struct lima_ip *ip);
        void (*fini)(struct lima_ip *ip);
+       int (*resume)(struct lima_ip *ip);
+       void (*suspend)(struct lima_ip *ip);
 };
 
 #define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
@@ -41,6 +43,8 @@ struct lima_ip_desc {
                }, \
                .init = lima_##func##_init, \
                .fini = lima_##func##_fini, \
+               .resume = lima_##func##_resume, \
+               .suspend = lima_##func##_suspend, \
        }
 
 static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
@@ -77,26 +81,10 @@ const char *lima_ip_name(struct lima_ip *ip)
        return lima_ip_desc[ip->id].name;
 }
 
-static int lima_clk_init(struct lima_device *dev)
+static int lima_clk_enable(struct lima_device *dev)
 {
        int err;
 
-       dev->clk_bus = devm_clk_get(dev->dev, "bus");
-       if (IS_ERR(dev->clk_bus)) {
-               err = PTR_ERR(dev->clk_bus);
-               if (err != -EPROBE_DEFER)
-                       dev_err(dev->dev, "get bus clk failed %d\n", err);
-               return err;
-       }
-
-       dev->clk_gpu = devm_clk_get(dev->dev, "core");
-       if (IS_ERR(dev->clk_gpu)) {
-               err = PTR_ERR(dev->clk_gpu);
-               if (err != -EPROBE_DEFER)
-                       dev_err(dev->dev, "get core clk failed %d\n", err);
-               return err;
-       }
-
        err = clk_prepare_enable(dev->clk_bus);
        if (err)
                return err;
@@ -105,15 +93,7 @@ static int lima_clk_init(struct lima_device *dev)
        if (err)
                goto error_out0;
 
-       dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
-
-       if (IS_ERR(dev->reset)) {
-               err = PTR_ERR(dev->reset);
-               if (err != -EPROBE_DEFER)
-                       dev_err(dev->dev, "get reset controller failed %d\n",
-                               err);
-               goto error_out1;
-       } else if (dev->reset != NULL) {
+       if (dev->reset) {
                err = reset_control_deassert(dev->reset);
                if (err) {
                        dev_err(dev->dev,
@@ -131,14 +111,76 @@ error_out0:
        return err;
 }
 
-static void lima_clk_fini(struct lima_device *dev)
+static void lima_clk_disable(struct lima_device *dev)
 {
-       if (dev->reset != NULL)
+       if (dev->reset)
                reset_control_assert(dev->reset);
        clk_disable_unprepare(dev->clk_gpu);
        clk_disable_unprepare(dev->clk_bus);
 }
 
+static int lima_clk_init(struct lima_device *dev)
+{
+       int err;
+
+       dev->clk_bus = devm_clk_get(dev->dev, "bus");
+       if (IS_ERR(dev->clk_bus)) {
+               err = PTR_ERR(dev->clk_bus);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev->dev, "get bus clk failed %d\n", err);
+               dev->clk_bus = NULL;
+               return err;
+       }
+
+       dev->clk_gpu = devm_clk_get(dev->dev, "core");
+       if (IS_ERR(dev->clk_gpu)) {
+               err = PTR_ERR(dev->clk_gpu);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev->dev, "get core clk failed %d\n", err);
+               dev->clk_gpu = NULL;
+               return err;
+       }
+
+       dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+       if (IS_ERR(dev->reset)) {
+               err = PTR_ERR(dev->reset);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev->dev, "get reset controller failed %d\n",
+                               err);
+               dev->reset = NULL;
+               return err;
+       }
+
+       return lima_clk_enable(dev);
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+       lima_clk_disable(dev);
+}
+
+static int lima_regulator_enable(struct lima_device *dev)
+{
+       int ret;
+
+       if (!dev->regulator)
+               return 0;
+
+       ret = regulator_enable(dev->regulator);
+       if (ret < 0) {
+               dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void lima_regulator_disable(struct lima_device *dev)
+{
+       if (dev->regulator)
+               regulator_disable(dev->regulator);
+}
+
 static int lima_regulator_init(struct lima_device *dev)
 {
        int ret;
@@ -154,25 +196,20 @@ static int lima_regulator_init(struct lima_device *dev)
                return ret;
        }
 
-       ret = regulator_enable(dev->regulator);
-       if (ret < 0) {
-               dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
+       return lima_regulator_enable(dev);
 }
 
 static void lima_regulator_fini(struct lima_device *dev)
 {
-       if (dev->regulator)
-               regulator_disable(dev->regulator);
+       lima_regulator_disable(dev);
 }
 
 static int lima_init_ip(struct lima_device *dev, int index)
 {
+       struct platform_device *pdev = to_platform_device(dev->dev);
        struct lima_ip_desc *desc = lima_ip_desc + index;
        struct lima_ip *ip = dev->ip + index;
+       const char *irq_name = desc->irq_name;
        int offset = desc->offset[dev->id];
        bool must = desc->must_have[dev->id];
        int err;
@@ -183,8 +220,9 @@ static int lima_init_ip(struct lima_device *dev, int index)
        ip->dev = dev;
        ip->id = index;
        ip->iomem = dev->iomem + offset;
-       if (desc->irq_name) {
-               err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+       if (irq_name) {
+               err = must ? platform_get_irq_byname(pdev, irq_name) :
+                            platform_get_irq_byname_optional(pdev, irq_name);
                if (err < 0)
                        goto out;
                ip->irq = err;
@@ -209,11 +247,34 @@ static void lima_fini_ip(struct lima_device *ldev, int index)
                desc->fini(ip);
 }
 
+static int lima_resume_ip(struct lima_device *ldev, int index)
+{
+       struct lima_ip_desc *desc = lima_ip_desc + index;
+       struct lima_ip *ip = ldev->ip + index;
+       int ret = 0;
+
+       if (ip->present)
+               ret = desc->resume(ip);
+
+       return ret;
+}
+
+static void lima_suspend_ip(struct lima_device *ldev, int index)
+{
+       struct lima_ip_desc *desc = lima_ip_desc + index;
+       struct lima_ip *ip = ldev->ip + index;
+
+       if (ip->present)
+               desc->suspend(ip);
+}
+
 static int lima_init_gp_pipe(struct lima_device *dev)
 {
        struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
        int err;
 
+       pipe->ldev = dev;
+
        err = lima_sched_pipe_init(pipe, "gp");
        if (err)
                return err;
@@ -244,6 +305,8 @@ static int lima_init_pp_pipe(struct lima_device *dev)
        struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
        int err, i;
 
+       pipe->ldev = dev;
+
        err = lima_sched_pipe_init(pipe, "pp");
        if (err)
                return err;
@@ -290,8 +353,8 @@ static void lima_fini_pp_pipe(struct lima_device *dev)
 
 int lima_device_init(struct lima_device *ldev)
 {
+       struct platform_device *pdev = to_platform_device(ldev->dev);
        int err, i;
-       struct resource *res;
 
        dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
 
@@ -322,8 +385,7 @@ int lima_device_init(struct lima_device *ldev)
        } else
                ldev->va_end = LIMA_VA_RESERVE_END;
 
-       res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
-       ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+       ldev->iomem = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ldev->iomem)) {
                dev_err(ldev->dev, "fail to ioremap iomem\n");
                err = PTR_ERR(ldev->iomem);
@@ -344,6 +406,12 @@ int lima_device_init(struct lima_device *ldev)
        if (err)
                goto err_out5;
 
+       ldev->dump.magic = LIMA_DUMP_MAGIC;
+       ldev->dump.version_major = LIMA_DUMP_MAJOR;
+       ldev->dump.version_minor = LIMA_DUMP_MINOR;
+       INIT_LIST_HEAD(&ldev->error_task_list);
+       mutex_init(&ldev->error_task_list_lock);
+
        dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus));
        dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu));
 
@@ -370,6 +438,13 @@ err_out0:
 void lima_device_fini(struct lima_device *ldev)
 {
        int i;
+       struct lima_sched_error_task *et, *tmp;
+
+       list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+               list_del(&et->list);
+               kvfree(et);
+       }
+       mutex_destroy(&ldev->error_task_list_lock);
 
        lima_fini_pp_pipe(ldev);
        lima_fini_gp_pipe(ldev);
@@ -387,3 +462,72 @@ void lima_device_fini(struct lima_device *ldev)
 
        lima_clk_fini(ldev);
 }
+
+int lima_device_resume(struct device *dev)
+{
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       int i, err;
+
+       err = lima_clk_enable(ldev);
+       if (err) {
+               dev_err(dev, "resume clk fail %d\n", err);
+               return err;
+       }
+
+       err = lima_regulator_enable(ldev);
+       if (err) {
+               dev_err(dev, "resume regulator fail %d\n", err);
+               goto err_out0;
+       }
+
+       for (i = 0; i < lima_ip_num; i++) {
+               err = lima_resume_ip(ldev, i);
+               if (err) {
+                       dev_err(dev, "resume ip %d fail\n", i);
+                       goto err_out1;
+               }
+       }
+
+       err = lima_devfreq_resume(&ldev->devfreq);
+       if (err) {
+               dev_err(dev, "devfreq resume fail\n");
+               goto err_out1;
+       }
+
+       return 0;
+
+err_out1:
+       while (--i >= 0)
+               lima_suspend_ip(ldev, i);
+       lima_regulator_disable(ldev);
+err_out0:
+       lima_clk_disable(ldev);
+       return err;
+}
+
+int lima_device_suspend(struct device *dev)
+{
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       int i, err;
+
+       /* check any task running */
+       for (i = 0; i < lima_pipe_num; i++) {
+               if (atomic_read(&ldev->pipe[i].base.hw_rq_count))
+                       return -EBUSY;
+       }
+
+       err = lima_devfreq_suspend(&ldev->devfreq);
+       if (err) {
+               dev_err(dev, "devfreq suspend fail\n");
+               return err;
+       }
+
+       for (i = lima_ip_num - 1; i >= 0; i--)
+               lima_suspend_ip(ldev, i);
+
+       lima_regulator_disable(ldev);
+
+       lima_clk_disable(ldev);
+
+       return 0;
+}
index 31158d86271c2b326b0c9b9e7a8917235e6b80ea..41b9d7b4bcc7a0128adda97379b6048735310096 100644 (file)
@@ -6,8 +6,12 @@
 
 #include <drm/drm_device.h>
 #include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
 
 #include "lima_sched.h"
+#include "lima_dump.h"
+#include "lima_devfreq.h"
 
 enum lima_gpu_id {
        lima_gpu_mali400 = 0,
@@ -60,6 +64,8 @@ struct lima_ip {
                bool async_reset;
                /* l2 cache */
                spinlock_t lock;
+               /* pmu/bcast */
+               u32 mask;
        } data;
 };
 
@@ -72,7 +78,6 @@ enum lima_pipe_id {
 struct lima_device {
        struct device *dev;
        struct drm_device *ddev;
-       struct platform_device *pdev;
 
        enum lima_gpu_id id;
        u32 gp_version;
@@ -94,6 +99,13 @@ struct lima_device {
 
        u32 *dlbu_cpu;
        dma_addr_t dlbu_dma;
+
+       struct lima_devfreq devfreq;
+
+       /* debug info */
+       struct lima_dump_head dump;
+       struct list_head error_task_list;
+       struct mutex error_task_list_lock;
 };
 
 static inline struct lima_device *
@@ -128,4 +140,7 @@ static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
        return 0;
 }
 
+int lima_device_suspend(struct device *dev);
+int lima_device_resume(struct device *dev);
+
 #endif
index 8399ceffb94bb8540ee4640ac4114bd652336eea..c1d5ea35daa7aecbea830adf7f946bc644c5166d 100644 (file)
@@ -42,7 +42,7 @@ void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
        dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
 }
 
-int lima_dlbu_init(struct lima_ip *ip)
+static int lima_dlbu_hw_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
 
@@ -52,6 +52,21 @@ int lima_dlbu_init(struct lima_ip *ip)
        return 0;
 }
 
+int lima_dlbu_resume(struct lima_ip *ip)
+{
+       return lima_dlbu_hw_init(ip);
+}
+
+void lima_dlbu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+       return lima_dlbu_hw_init(ip);
+}
+
 void lima_dlbu_fini(struct lima_ip *ip)
 {
 
index 16f877984466d4eb7849e7b076e8a62075862517..be71daaaee892f46a777995da6bbf387cf657321 100644 (file)
@@ -12,6 +12,8 @@ void lima_dlbu_disable(struct lima_device *dev);
 
 void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
 
+int lima_dlbu_resume(struct lima_ip *ip);
+void lima_dlbu_suspend(struct lima_ip *ip);
 int lima_dlbu_init(struct lima_ip *ip);
 void lima_dlbu_fini(struct lima_ip *ip);
 
index 2daac64d8955c6d43d283fe14338d84db33bbed2..a831565af81345a9f8959b01704fadd7c17aa5ec 100644 (file)
@@ -5,17 +5,20 @@
 #include <linux/of_platform.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_prime.h>
 #include <drm/lima_drm.h>
 
+#include "lima_device.h"
 #include "lima_drv.h"
 #include "lima_gem.h"
 #include "lima_vm.h"
 
 int lima_sched_timeout_ms;
 uint lima_heap_init_nr_pages = 8;
+uint lima_max_error_tasks;
 
 MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
 module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
@@ -23,6 +26,9 @@ module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
 MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
 module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
 
+MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save");
+module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644);
+
 static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
 {
        struct drm_lima_get_param *args = data;
@@ -272,6 +278,93 @@ static struct drm_driver lima_drm_driver = {
        .gem_prime_mmap = drm_gem_prime_mmap,
 };
 
+struct lima_block_reader {
+       void *dst;
+       size_t base;
+       size_t count;
+       size_t off;
+       ssize_t read;
+};
+
+static bool lima_read_block(struct lima_block_reader *reader,
+                           void *src, size_t src_size)
+{
+       size_t max_off = reader->base + src_size;
+
+       if (reader->off < max_off) {
+               size_t size = min_t(size_t, max_off - reader->off,
+                                   reader->count);
+
+               memcpy(reader->dst, src + (reader->off - reader->base), size);
+
+               reader->dst += size;
+               reader->off += size;
+               reader->read += size;
+               reader->count -= size;
+       }
+
+       reader->base = max_off;
+
+       return !!reader->count;
+}
+
+static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
+                                    struct bin_attribute *attr, char *buf,
+                                    loff_t off, size_t count)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       struct lima_sched_error_task *et;
+       struct lima_block_reader reader = {
+               .dst = buf,
+               .count = count,
+               .off = off,
+       };
+
+       mutex_lock(&ldev->error_task_list_lock);
+
+       if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) {
+               list_for_each_entry(et, &ldev->error_task_list, list) {
+                       if (!lima_read_block(&reader, et->data, et->size))
+                               break;
+               }
+       }
+
+       mutex_unlock(&ldev->error_task_list_lock);
+       return reader.read;
+}
+
+static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj,
+                                     struct bin_attribute *attr, char *buf,
+                                     loff_t off, size_t count)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       struct lima_sched_error_task *et, *tmp;
+
+       mutex_lock(&ldev->error_task_list_lock);
+
+       list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+               list_del(&et->list);
+               kvfree(et);
+       }
+
+       ldev->dump.size = 0;
+       ldev->dump.num_tasks = 0;
+
+       mutex_unlock(&ldev->error_task_list_lock);
+
+       return count;
+}
+
+static const struct bin_attribute lima_error_state_attr = {
+       .attr.name = "error",
+       .attr.mode = 0600,
+       .size = 0,
+       .read = lima_error_state_read,
+       .write = lima_error_state_write,
+};
+
 static int lima_pdev_probe(struct platform_device *pdev)
 {
        struct lima_device *ldev;
@@ -288,7 +381,6 @@ static int lima_pdev_probe(struct platform_device *pdev)
                goto err_out0;
        }
 
-       ldev->pdev = pdev;
        ldev->dev = &pdev->dev;
        ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
 
@@ -306,16 +398,34 @@ static int lima_pdev_probe(struct platform_device *pdev)
        if (err)
                goto err_out1;
 
+       err = lima_devfreq_init(ldev);
+       if (err) {
+               dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+               goto err_out2;
+       }
+
+       pm_runtime_set_active(ldev->dev);
+       pm_runtime_mark_last_busy(ldev->dev);
+       pm_runtime_set_autosuspend_delay(ldev->dev, 200);
+       pm_runtime_use_autosuspend(ldev->dev);
+       pm_runtime_enable(ldev->dev);
+
        /*
         * Register the DRM device with the core and the connectors with
         * sysfs.
         */
        err = drm_dev_register(ddev, 0);
        if (err < 0)
-               goto err_out2;
+               goto err_out3;
+
+       if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr))
+               dev_warn(ldev->dev, "fail to create error state sysfs\n");
 
        return 0;
 
+err_out3:
+       pm_runtime_disable(ldev->dev);
+       lima_devfreq_fini(ldev);
 err_out2:
        lima_device_fini(ldev);
 err_out1:
@@ -330,8 +440,17 @@ static int lima_pdev_remove(struct platform_device *pdev)
        struct lima_device *ldev = platform_get_drvdata(pdev);
        struct drm_device *ddev = ldev->ddev;
 
+       sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr);
+
        drm_dev_unregister(ddev);
+
+       /* stop autosuspend to make sure device is in active state */
+       pm_runtime_set_autosuspend_delay(ldev->dev, -1);
+       pm_runtime_disable(ldev->dev);
+
+       lima_devfreq_fini(ldev);
        lima_device_fini(ldev);
+
        drm_dev_put(ddev);
        lima_sched_slab_fini();
        return 0;
@@ -344,26 +463,22 @@ static const struct of_device_id dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, dt_match);
 
+static const struct dev_pm_ops lima_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(lima_device_suspend, lima_device_resume, NULL)
+};
+
 static struct platform_driver lima_platform_driver = {
        .probe      = lima_pdev_probe,
        .remove     = lima_pdev_remove,
        .driver     = {
                .name   = "lima",
+               .pm     = &lima_pm_ops,
                .of_match_table = dt_match,
        },
 };
 
-static int __init lima_init(void)
-{
-       return platform_driver_register(&lima_platform_driver);
-}
-module_init(lima_init);
-
-static void __exit lima_exit(void)
-{
-       platform_driver_unregister(&lima_platform_driver);
-}
-module_exit(lima_exit);
+module_platform_driver(lima_platform_driver);
 
 MODULE_AUTHOR("Lima Project Developers");
 MODULE_DESCRIPTION("Lima DRM Driver");
index f492ecc6a5d9e127bb5b752d8b5ab3f33b455e6b..fdbd4077c768ded9fbb56114558d5aded58b898a 100644 (file)
@@ -10,6 +10,7 @@
 
 extern int lima_sched_timeout_ms;
 extern uint lima_heap_init_nr_pages;
+extern uint lima_max_error_tasks;
 
 struct lima_vm;
 struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_dump.h b/drivers/gpu/drm/lima/lima_dump.h
new file mode 100644 (file)
index 0000000..ca243d9
--- /dev/null
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DUMP_H__
+#define __LIMA_DUMP_H__
+
+#include <linux/types.h>
+
+/**
+ * dump file format for all the information to start a lima task
+ *
+ * top level format
+ * | magic code "LIMA" | format version | num tasks | data size |
+ * | reserved | reserved | reserved | reserved |
+ * | task 1 ID | task 1 size | num chunks | reserved | task 1 data |
+ * | task 2 ID | task 2 size | num chunks | reserved | task 2 data |
+ * ...
+ *
+ * task data format
+ * | chunk 1 ID | chunk 1 size | reserved | reserved | chunk 1 data |
+ * | chunk 2 ID | chunk 2 size | reserved | reserved | chunk 2 data |
+ * ...
+ *
+ */
+
+#define LIMA_DUMP_MAJOR 1
+#define LIMA_DUMP_MINOR 0
+
+#define LIMA_DUMP_MAGIC 0x414d494c
+
+struct lima_dump_head {
+       __u32 magic;
+       __u16 version_major;
+       __u16 version_minor;
+       __u32 num_tasks;
+       __u32 size;
+       __u32 reserved[4];
+};
+
+#define LIMA_DUMP_TASK_GP   0
+#define LIMA_DUMP_TASK_PP   1
+#define LIMA_DUMP_TASK_NUM  2
+
+struct lima_dump_task {
+       __u32 id;
+       __u32 size;
+       __u32 num_chunks;
+       __u32 reserved;
+};
+
+#define LIMA_DUMP_CHUNK_FRAME         0
+#define LIMA_DUMP_CHUNK_BUFFER        1
+#define LIMA_DUMP_CHUNK_PROCESS_NAME  2
+#define LIMA_DUMP_CHUNK_PROCESS_ID    3
+#define LIMA_DUMP_CHUNK_NUM           4
+
+struct lima_dump_chunk {
+       __u32 id;
+       __u32 size;
+       __u32 reserved[2];
+};
+
+struct lima_dump_chunk_buffer {
+       __u32 id;
+       __u32 size;
+       __u32 va;
+       __u32 reserved;
+};
+
+struct lima_dump_chunk_pid {
+       __u32 id;
+       __u32 size;
+       __u32 pid;
+       __u32 reserved;
+};
+
+#endif
index d8841c870d906ab6dcc8269b23a8f13ddb14ee67..8dd501b7a3d0d8bd3656429c2508e0f937f66e04 100644 (file)
@@ -274,6 +274,23 @@ static void lima_gp_print_version(struct lima_ip *ip)
 static struct kmem_cache *lima_gp_task_slab;
 static int lima_gp_task_slab_refcnt;
 
+static int lima_gp_hw_init(struct lima_ip *ip)
+{
+       ip->data.async_reset = false;
+       lima_gp_soft_reset_async(ip);
+       return lima_gp_soft_reset_async_wait(ip);
+}
+
+int lima_gp_resume(struct lima_ip *ip)
+{
+       return lima_gp_hw_init(ip);
+}
+
+void lima_gp_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_gp_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
@@ -281,9 +298,7 @@ int lima_gp_init(struct lima_ip *ip)
 
        lima_gp_print_version(ip);
 
-       ip->data.async_reset = false;
-       lima_gp_soft_reset_async(ip);
-       err = lima_gp_soft_reset_async_wait(ip);
+       err = lima_gp_hw_init(ip);
        if (err)
                return err;
 
index 516e5c1babbb45643bbfd243391fae3b2ab24d0b..02ec9af78a518e8fad9242b754c446d62bb6549f 100644 (file)
@@ -7,6 +7,8 @@
 struct lima_ip;
 struct lima_device;
 
+int lima_gp_resume(struct lima_ip *ip);
+void lima_gp_suspend(struct lima_ip *ip);
 int lima_gp_init(struct lima_ip *ip);
 void lima_gp_fini(struct lima_ip *ip);
 
index 6873a7af5a5ce7a6690079d1900e94c43a7fa01a..c4080a02957ba808307d31190babae70ed116aa1 100644 (file)
@@ -38,9 +38,35 @@ int lima_l2_cache_flush(struct lima_ip *ip)
        return ret;
 }
 
+static int lima_l2_cache_hw_init(struct lima_ip *ip)
+{
+       int err;
+
+       err = lima_l2_cache_flush(ip);
+       if (err)
+               return err;
+
+       l2_cache_write(LIMA_L2_CACHE_ENABLE,
+                      LIMA_L2_CACHE_ENABLE_ACCESS |
+                      LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+       l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+       return 0;
+}
+
+int lima_l2_cache_resume(struct lima_ip *ip)
+{
+       return lima_l2_cache_hw_init(ip);
+}
+
+void lima_l2_cache_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_l2_cache_init(struct lima_ip *ip)
 {
-       int i, err;
+       int i;
        u32 size;
        struct lima_device *dev = ip->dev;
 
@@ -63,15 +89,7 @@ int lima_l2_cache_init(struct lima_ip *ip)
                 1 << (size & 0xff),
                 1 << ((size >> 24) & 0xff));
 
-       err = lima_l2_cache_flush(ip);
-       if (err)
-               return err;
-
-       l2_cache_write(LIMA_L2_CACHE_ENABLE,
-                      LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
-       l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
-
-       return 0;
+       return lima_l2_cache_hw_init(ip);
 }
 
 void lima_l2_cache_fini(struct lima_ip *ip)
index c63fb676ff1412e9163da5a4a46bb2f961758709..1aeeefd53fb9ecd55d253e2489d506f930f02f83 100644 (file)
@@ -6,6 +6,8 @@
 
 struct lima_ip;
 
+int lima_l2_cache_resume(struct lima_ip *ip);
+void lima_l2_cache_suspend(struct lima_ip *ip);
 int lima_l2_cache_init(struct lima_ip *ip);
 void lima_l2_cache_fini(struct lima_ip *ip);
 
index f79d2af427e77cc173b6377118d5176b53f88360..a1ae6c252dc2b546bad5f12226cad197ff006808 100644 (file)
@@ -59,12 +59,44 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-int lima_mmu_init(struct lima_ip *ip)
+static int lima_mmu_hw_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
        int err;
        u32 v;
 
+       mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+       err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+                                   LIMA_MMU_DTE_ADDR, v, v == 0);
+       if (err)
+               return err;
+
+       mmu_write(LIMA_MMU_INT_MASK,
+                 LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+       mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+       return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+                                    LIMA_MMU_STATUS, v,
+                                    v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+int lima_mmu_resume(struct lima_ip *ip)
+{
+       if (ip->id == lima_ip_ppmmu_bcast)
+               return 0;
+
+       return lima_mmu_hw_init(ip);
+}
+
+void lima_mmu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+       struct lima_device *dev = ip->dev;
+       int err;
+
        if (ip->id == lima_ip_ppmmu_bcast)
                return 0;
 
@@ -74,12 +106,6 @@ int lima_mmu_init(struct lima_ip *ip)
                return -EIO;
        }
 
-       mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
-       err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
-                                   LIMA_MMU_DTE_ADDR, v, v == 0);
-       if (err)
-               return err;
-
        err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
                               IRQF_SHARED, lima_ip_name(ip), ip);
        if (err) {
@@ -87,11 +113,7 @@ int lima_mmu_init(struct lima_ip *ip)
                return err;
        }
 
-       mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
-       mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
-       return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
-                                    LIMA_MMU_STATUS, v,
-                                    v & LIMA_MMU_STATUS_PAGING_ENABLED);
+       return lima_mmu_hw_init(ip);
 }
 
 void lima_mmu_fini(struct lima_ip *ip)
@@ -113,8 +135,7 @@ void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
                              LIMA_MMU_STATUS, v,
                              v & LIMA_MMU_STATUS_STALL_ACTIVE);
 
-       if (vm)
-               mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+       mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
 
        /* flush the TLB */
        mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
index 4f8ccbebcba147ff023f5f942c7c451caf0658dd..f0c97ac75ea0100b31c81953321a3d99e873800a 100644 (file)
@@ -7,6 +7,8 @@
 struct lima_ip;
 struct lima_vm;
 
+int lima_mmu_resume(struct lima_ip *ip);
+void lima_mmu_suspend(struct lima_ip *ip);
 int lima_mmu_init(struct lima_ip *ip);
 void lima_mmu_fini(struct lima_ip *ip);
 
index 571f6d66158182a369d850da4dfac6a3801407fe..e397e1146e96314808b4964576fc419988b11053 100644 (file)
@@ -21,7 +21,7 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
                                 v, v & LIMA_PMU_INT_CMD_MASK,
                                 100, 100000);
        if (err) {
-               dev_err(dev->dev, "timeout wait pmd cmd\n");
+               dev_err(dev->dev, "timeout wait pmu cmd\n");
                return err;
        }
 
@@ -29,7 +29,41 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
        return 0;
 }
 
-int lima_pmu_init(struct lima_ip *ip)
+static u32 lima_pmu_get_ip_mask(struct lima_ip *ip)
+{
+       struct lima_device *dev = ip->dev;
+       u32 ret = 0;
+       int i;
+
+       ret |= LIMA_PMU_POWER_GP0_MASK;
+
+       if (dev->id == lima_gpu_mali400) {
+               ret |= LIMA_PMU_POWER_L2_MASK;
+               for (i = 0; i < 4; i++) {
+                       if (dev->ip[lima_ip_pp0 + i].present)
+                               ret |= LIMA_PMU_POWER_PP_MASK(i);
+               }
+       } else {
+               if (dev->ip[lima_ip_pp0].present)
+                       ret |= LIMA450_PMU_POWER_PP0_MASK;
+               for (i = lima_ip_pp1; i <= lima_ip_pp3; i++) {
+                       if (dev->ip[i].present) {
+                               ret |= LIMA450_PMU_POWER_PP13_MASK;
+                               break;
+                       }
+               }
+               for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+                       if (dev->ip[i].present) {
+                               ret |= LIMA450_PMU_POWER_PP47_MASK;
+                               break;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int lima_pmu_hw_init(struct lima_ip *ip)
 {
        int err;
        u32 stat;
@@ -54,7 +88,44 @@ int lima_pmu_init(struct lima_ip *ip)
        return 0;
 }
 
-void lima_pmu_fini(struct lima_ip *ip)
+static void lima_pmu_hw_fini(struct lima_ip *ip)
 {
+       u32 stat;
+
+       if (!ip->data.mask)
+               ip->data.mask = lima_pmu_get_ip_mask(ip);
 
+       stat = ~pmu_read(LIMA_PMU_STATUS) & ip->data.mask;
+       if (stat) {
+               pmu_write(LIMA_PMU_POWER_DOWN, stat);
+
+               /* Don't wait for interrupt on Mali400 if all domains are
+                * powered off because the HW won't generate an interrupt
+                * in this case.
+                */
+               if (ip->dev->id == lima_gpu_mali400)
+                       pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+               else
+                       lima_pmu_wait_cmd(ip);
+       }
+}
+
+int lima_pmu_resume(struct lima_ip *ip)
+{
+       return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_suspend(struct lima_ip *ip)
+{
+       lima_pmu_hw_fini(ip);
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+       return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+       lima_pmu_hw_fini(ip);
 }
index a2a18775eb07ddcf833723f95d0443fccfa0e1a1..652dc7af30473fff55c8b709083f6bf13907b996 100644 (file)
@@ -6,6 +6,8 @@
 
 struct lima_ip;
 
+int lima_pmu_resume(struct lima_ip *ip);
+void lima_pmu_suspend(struct lima_ip *ip);
 int lima_pmu_init(struct lima_ip *ip);
 void lima_pmu_fini(struct lima_ip *ip);
 
index 8fef224b93c8568fc70bd483739a7cd84d5e49e6..33f01383409c0e80f8f0fd8805ce06f35cb02a77 100644 (file)
@@ -223,6 +223,23 @@ static void lima_pp_print_version(struct lima_ip *ip)
                 lima_ip_name(ip), name, major, minor);
 }
 
+static int lima_pp_hw_init(struct lima_ip *ip)
+{
+       ip->data.async_reset = false;
+       lima_pp_soft_reset_async(ip);
+       return lima_pp_soft_reset_async_wait(ip);
+}
+
+int lima_pp_resume(struct lima_ip *ip)
+{
+       return lima_pp_hw_init(ip);
+}
+
+void lima_pp_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_pp_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
@@ -230,9 +247,7 @@ int lima_pp_init(struct lima_ip *ip)
 
        lima_pp_print_version(ip);
 
-       ip->data.async_reset = false;
-       lima_pp_soft_reset_async(ip);
-       err = lima_pp_soft_reset_async_wait(ip);
+       err = lima_pp_hw_init(ip);
        if (err)
                return err;
 
@@ -254,6 +269,16 @@ void lima_pp_fini(struct lima_ip *ip)
 
 }
 
+int lima_pp_bcast_resume(struct lima_ip *ip)
+{
+       return 0;
+}
+
+void lima_pp_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_pp_bcast_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
index bf60c77b26338f1f181d1fa95437111fe9476f18..16ec96de15a982d6c4f5b4343f6d4a4fcb4a3efe 100644 (file)
@@ -7,9 +7,13 @@
 struct lima_ip;
 struct lima_device;
 
+int lima_pp_resume(struct lima_ip *ip);
+void lima_pp_suspend(struct lima_ip *ip);
 int lima_pp_init(struct lima_ip *ip);
 void lima_pp_fini(struct lima_ip *ip);
 
+int lima_pp_bcast_resume(struct lima_ip *ip);
+void lima_pp_bcast_suspend(struct lima_ip *ip);
 int lima_pp_bcast_init(struct lima_ip *ip);
 void lima_pp_bcast_fini(struct lima_ip *ip);
 
index 3886999b453303a58b5b2935d92f30f10bab4ee7..e6cefda0027959d169d059ecb99d959945792a17 100644 (file)
@@ -3,14 +3,17 @@
 
 #include <linux/kthread.h>
 #include <linux/slab.h>
-#include <linux/xarray.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
 
+#include "lima_devfreq.h"
 #include "lima_drv.h"
 #include "lima_sched.h"
 #include "lima_vm.h"
 #include "lima_mmu.h"
 #include "lima_l2_cache.h"
 #include "lima_gem.h"
+#include "lima_trace.h"
 
 struct lima_fence {
        struct dma_fence base;
@@ -176,6 +179,7 @@ struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *conte
 {
        struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
 
+       trace_lima_task_submit(task);
        drm_sched_entity_push_job(&task->base, &context->base);
        return fence;
 }
@@ -191,14 +195,36 @@ static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
        return NULL;
 }
 
+static int lima_pm_busy(struct lima_device *ldev)
+{
+       int ret;
+
+       /* resume GPU if it has been suspended by runtime PM */
+       ret = pm_runtime_get_sync(ldev->dev);
+       if (ret < 0)
+               return ret;
+
+       lima_devfreq_record_busy(&ldev->devfreq);
+       return 0;
+}
+
+static void lima_pm_idle(struct lima_device *ldev)
+{
+       lima_devfreq_record_idle(&ldev->devfreq);
+
+       /* GPU can do auto runtime suspend */
+       pm_runtime_mark_last_busy(ldev->dev);
+       pm_runtime_put_autosuspend(ldev->dev);
+}
+
 static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
 {
        struct lima_sched_task *task = to_lima_task(job);
        struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+       struct lima_device *ldev = pipe->ldev;
        struct lima_fence *fence;
        struct dma_fence *ret;
-       struct lima_vm *vm = NULL, *last_vm = NULL;
-       int i;
+       int i, err;
 
        /* after GPU reset */
        if (job->s_fence->finished.error < 0)
@@ -207,6 +233,13 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
        fence = lima_fence_create(pipe);
        if (!fence)
                return NULL;
+
+       err = lima_pm_busy(ldev);
+       if (err < 0) {
+               dma_fence_put(&fence->base);
+               return NULL;
+       }
+
        task->fence = &fence->base;
 
        /* for caller usage of the fence, otherwise irq handler
@@ -234,21 +267,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
        for (i = 0; i < pipe->num_l2_cache; i++)
                lima_l2_cache_flush(pipe->l2_cache[i]);
 
-       if (task->vm != pipe->current_vm) {
-               vm = lima_vm_get(task->vm);
-               last_vm = pipe->current_vm;
-               pipe->current_vm = task->vm;
-       }
+       lima_vm_put(pipe->current_vm);
+       pipe->current_vm = lima_vm_get(task->vm);
 
        if (pipe->bcast_mmu)
-               lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+               lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
        else {
                for (i = 0; i < pipe->num_mmu; i++)
-                       lima_mmu_switch_vm(pipe->mmu[i], vm);
+                       lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
        }
 
-       if (last_vm)
-               lima_vm_put(last_vm);
+       trace_lima_task_run(task);
 
        pipe->error = false;
        pipe->task_run(pipe, task);
@@ -256,10 +285,139 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
        return task->fence;
 }
 
+static void lima_sched_build_error_task_list(struct lima_sched_task *task)
+{
+       struct lima_sched_error_task *et;
+       struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
+       struct lima_ip *ip = pipe->processor[0];
+       int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
+       struct lima_device *dev = ip->dev;
+       struct lima_sched_context *sched_ctx =
+               container_of(task->base.entity,
+                            struct lima_sched_context, base);
+       struct lima_ctx *ctx =
+               container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
+       struct lima_dump_task *dt;
+       struct lima_dump_chunk *chunk;
+       struct lima_dump_chunk_pid *pid_chunk;
+       struct lima_dump_chunk_buffer *buffer_chunk;
+       u32 size, task_size, mem_size;
+       int i;
+
+       mutex_lock(&dev->error_task_list_lock);
+
+       if (dev->dump.num_tasks >= lima_max_error_tasks) {
+               dev_info(dev->dev, "fail to save task state from %s pid %d: "
+                        "error task list is full\n", ctx->pname, ctx->pid);
+               goto out;
+       }
+
+       /* frame chunk */
+       size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
+       /* process name chunk */
+       size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
+       /* pid chunk */
+       size += sizeof(struct lima_dump_chunk);
+       /* buffer chunks */
+       for (i = 0; i < task->num_bos; i++) {
+               struct lima_bo *bo = task->bos[i];
+
+               size += sizeof(struct lima_dump_chunk);
+               size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
+       }
+
+       task_size = size + sizeof(struct lima_dump_task);
+       mem_size = task_size + sizeof(*et);
+       et = kvmalloc(mem_size, GFP_KERNEL);
+       if (!et) {
+               dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
+                       mem_size);
+               goto out;
+       }
+
+       et->data = et + 1;
+       et->size = task_size;
+
+       dt = et->data;
+       memset(dt, 0, sizeof(*dt));
+       dt->id = pipe_id;
+       dt->size = size;
+
+       chunk = (struct lima_dump_chunk *)(dt + 1);
+       memset(chunk, 0, sizeof(*chunk));
+       chunk->id = LIMA_DUMP_CHUNK_FRAME;
+       chunk->size = pipe->frame_size;
+       memcpy(chunk + 1, task->frame, pipe->frame_size);
+       dt->num_chunks++;
+
+       chunk = (void *)(chunk + 1) + chunk->size;
+       memset(chunk, 0, sizeof(*chunk));
+       chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
+       chunk->size = sizeof(ctx->pname);
+       memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
+       dt->num_chunks++;
+
+       pid_chunk = (void *)(chunk + 1) + chunk->size;
+       memset(pid_chunk, 0, sizeof(*pid_chunk));
+       pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
+       pid_chunk->pid = ctx->pid;
+       dt->num_chunks++;
+
+       buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
+       for (i = 0; i < task->num_bos; i++) {
+               struct lima_bo *bo = task->bos[i];
+               void *data;
+
+               memset(buffer_chunk, 0, sizeof(*buffer_chunk));
+               buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
+               buffer_chunk->va = lima_vm_get_va(task->vm, bo);
+
+               if (bo->heap_size) {
+                       buffer_chunk->size = bo->heap_size;
+
+                       data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
+                                   VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+                       if (!data) {
+                               kvfree(et);
+                               goto out;
+                       }
+
+                       memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+                       vunmap(data);
+               } else {
+                       buffer_chunk->size = lima_bo_size(bo);
+
+                       data = drm_gem_shmem_vmap(&bo->base.base);
+                       if (IS_ERR_OR_NULL(data)) {
+                               kvfree(et);
+                               goto out;
+                       }
+
+                       memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+                       drm_gem_shmem_vunmap(&bo->base.base, data);
+               }
+
+               buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+               dt->num_chunks++;
+       }
+
+       list_add(&et->list, &dev->error_task_list);
+       dev->dump.size += et->size;
+       dev->dump.num_tasks++;
+
+       dev_info(dev->dev, "save error task state success\n");
+
+out:
+       mutex_unlock(&dev->error_task_list_lock);
+}
+
 static void lima_sched_timedout_job(struct drm_sched_job *job)
 {
        struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
        struct lima_sched_task *task = to_lima_task(job);
+       struct lima_device *ldev = pipe->ldev;
 
        if (!pipe->error)
                DRM_ERROR("lima job timeout\n");
@@ -268,6 +426,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
 
        drm_sched_increase_karma(&task->base);
 
+       lima_sched_build_error_task_list(task);
+
        pipe->task_error(pipe);
 
        if (pipe->bcast_mmu)
@@ -279,12 +439,12 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
                        lima_mmu_page_fault_resume(pipe->mmu[i]);
        }
 
-       if (pipe->current_vm)
-               lima_vm_put(pipe->current_vm);
-
+       lima_vm_put(pipe->current_vm);
        pipe->current_vm = NULL;
        pipe->current_task = NULL;
 
+       lima_pm_idle(ldev);
+
        drm_sched_resubmit_jobs(&pipe->base);
        drm_sched_start(&pipe->base, true);
 }
@@ -355,6 +515,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
 {
        struct lima_sched_task *task = pipe->current_task;
+       struct lima_device *ldev = pipe->ldev;
 
        if (pipe->error) {
                if (task && task->recoverable)
@@ -364,5 +525,7 @@ void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
        } else {
                pipe->task_fini(pipe);
                dma_fence_signal(task->fence);
+
+               lima_pm_idle(ldev);
        }
 }
index d64393fb50a9b667ab05eca7fb4ec3712daf5824..90f03c48ef4a89323cb0f06f0ab579f20b477ef2 100644 (file)
@@ -5,9 +5,18 @@
 #define __LIMA_SCHED_H__
 
 #include <drm/gpu_scheduler.h>
+#include <linux/list.h>
+#include <linux/xarray.h>
 
+struct lima_device;
 struct lima_vm;
 
+struct lima_sched_error_task {
+       struct list_head list;
+       void *data;
+       u32 size;
+};
+
 struct lima_sched_task {
        struct drm_sched_job base;
 
@@ -44,6 +53,8 @@ struct lima_sched_pipe {
        u32 fence_seqno;
        spinlock_t fence_lock;
 
+       struct lima_device *ldev;
+
        struct lima_sched_task *current_task;
        struct lima_vm *current_vm;
 
diff --git a/drivers/gpu/drm/lima/lima_trace.c b/drivers/gpu/drm/lima/lima_trace.c
new file mode 100644 (file)
index 0000000..ea1c728
--- /dev/null
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#include "lima_sched.h"
+
+#define CREATE_TRACE_POINTS
+#include "lima_trace.h"
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
new file mode 100644 (file)
index 0000000..3a430e9
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#if !defined(_LIMA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _LIMA_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lima
+#define TRACE_INCLUDE_FILE lima_trace
+
+DECLARE_EVENT_CLASS(lima_task,
+       TP_PROTO(struct lima_sched_task *task),
+       TP_ARGS(task),
+       TP_STRUCT__entry(
+               __field(uint64_t, task_id)
+               __field(unsigned int, context)
+               __field(unsigned int, seqno)
+               __string(pipe, task->base.sched->name)
+               ),
+
+       TP_fast_assign(
+               __entry->task_id = task->base.id;
+               __entry->context = task->base.s_fence->finished.context;
+               __entry->seqno = task->base.s_fence->finished.seqno;
+               __assign_str(pipe, task->base.sched->name)
+               ),
+
+       TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
+                 __entry->task_id, __entry->context, __entry->seqno,
+                 __get_str(pipe))
+);
+
+DEFINE_EVENT(lima_task, lima_task_submit,
+            TP_PROTO(struct lima_sched_task *task),
+            TP_ARGS(task)
+);
+
+DEFINE_EVENT(lima_task, lima_task_run,
+            TP_PROTO(struct lima_sched_task *task),
+            TP_ARGS(task)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/lima
+#include <trace/define_trace.h>
index 22aeec77d84d3e1c0c11d51c3391d1019a64fffd..3a7c74822d8b2b0e584a3777792a0c860c535b21 100644 (file)
@@ -54,7 +54,8 @@ static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
 
 static inline void lima_vm_put(struct lima_vm *vm)
 {
-       kref_put(&vm->refcount, lima_vm_release);
+       if (vm)
+               kref_put(&vm->refcount, lima_vm_release);
 }
 
 void lima_vm_print(struct lima_vm *vm);
index e59907e688541a2ff5dfc6904e2a9980908d376f..04e1d38d41f79968f30e8a1298dd41211f871e25 100644 (file)
@@ -948,7 +948,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        struct drm_pending_vblank_event *event;
 
        drm_crtc_vblank_off(crtc);
@@ -1020,7 +1020,7 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        struct drm_pending_vblank_event *event = crtc->state->event;
        struct drm_plane *plane = &pipe->plane;
        struct drm_plane_state *pstate = plane->state;
@@ -1078,7 +1078,7 @@ static int mcde_display_enable_vblank(struct drm_simple_display_pipe *pipe)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        u32 val;
 
        /* Enable all VBLANK IRQs */
@@ -1097,7 +1097,7 @@ static void mcde_display_disable_vblank(struct drm_simple_display_pipe *pipe)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
 
        /* Disable all VBLANK IRQs */
        writel(0, mcde->regs + MCDE_IMSCPP);
@@ -1117,7 +1117,7 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
 
 int mcde_display_init(struct drm_device *drm)
 {
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        int ret;
        static const u32 formats[] = {
                DRM_FORMAT_ARGB8888,
index 80edd6628979c0d4d9224a6e145a2a5c7d6d97fe..679c2c4e6d9de63ca9aff17d9782ded83dd36712 100644 (file)
@@ -34,6 +34,8 @@ struct mcde {
        struct regulator *vana;
 };
 
+#define to_mcde(dev) container_of(dev, struct mcde, drm)
+
 bool mcde_dsi_irq(struct mipi_dsi_device *mdsi);
 void mcde_dsi_te_request(struct mipi_dsi_device *mdsi);
 extern struct platform_driver mcde_dsi_driver;
index f28cb7a576ba40f08c8fc448ff0e69c76d2bbd81..84f3e2dbd77bde90f94a5b394e482336685a093c 100644 (file)
@@ -72,6 +72,7 @@
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_panel.h>
@@ -163,7 +164,7 @@ static irqreturn_t mcde_irq(int irq, void *data)
 static int mcde_modeset_init(struct drm_device *drm)
 {
        struct drm_mode_config *mode_config;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        int ret;
 
        if (!mcde->bridge) {
@@ -183,13 +184,13 @@ static int mcde_modeset_init(struct drm_device *drm)
        ret = drm_vblank_init(drm, 1);
        if (ret) {
                dev_err(drm->dev, "failed to init vblank\n");
-               goto out_config;
+               return ret;
        }
 
        ret = mcde_display_init(drm);
        if (ret) {
                dev_err(drm->dev, "failed to init display\n");
-               goto out_config;
+               return ret;
        }
 
        /*
@@ -203,7 +204,7 @@ static int mcde_modeset_init(struct drm_device *drm)
                                                    mcde->bridge);
        if (ret) {
                dev_err(drm->dev, "failed to attach display output bridge\n");
-               goto out_config;
+               return ret;
        }
 
        drm_mode_config_reset(drm);
@@ -211,19 +212,6 @@ static int mcde_modeset_init(struct drm_device *drm)
        drm_fbdev_generic_setup(drm, 32);
 
        return 0;
-
-out_config:
-       drm_mode_config_cleanup(drm);
-       return ret;
-}
-
-static void mcde_release(struct drm_device *drm)
-{
-       struct mcde *mcde = drm->dev_private;
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(mcde);
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
@@ -231,7 +219,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
 static struct drm_driver mcde_drm_driver = {
        .driver_features =
                DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
-       .release = mcde_release,
        .lastclose = drm_fb_helper_lastclose,
        .ioctls = NULL,
        .fops = &drm_fops,
@@ -259,7 +246,9 @@ static int mcde_drm_bind(struct device *dev)
        struct drm_device *drm = dev_get_drvdata(dev);
        int ret;
 
-       drm_mode_config_init(drm);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               return ret;
 
        ret = component_bind_all(drm->dev, drm);
        if (ret) {
@@ -318,35 +307,27 @@ static int mcde_probe(struct platform_device *pdev)
        int ret;
        int i;
 
-       mcde = kzalloc(sizeof(*mcde), GFP_KERNEL);
-       if (!mcde)
-               return -ENOMEM;
-       mcde->dev = dev;
-
-       ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev);
-       if (ret) {
-               kfree(mcde);
-               return ret;
-       }
+       mcde = devm_drm_dev_alloc(dev, &mcde_drm_driver, struct mcde, drm);
+       if (IS_ERR(mcde))
+               return PTR_ERR(mcde);
        drm = &mcde->drm;
-       drm->dev_private = mcde;
+       mcde->dev = dev;
        platform_set_drvdata(pdev, drm);
 
        /* Enable continuous updates: this is what Linux' framebuffer expects */
        mcde->oneshot_mode = false;
-       drm->dev_private = mcde;
 
        /* First obtain and turn on the main power */
        mcde->epod = devm_regulator_get(dev, "epod");
        if (IS_ERR(mcde->epod)) {
                ret = PTR_ERR(mcde->epod);
                dev_err(dev, "can't get EPOD regulator\n");
-               goto dev_unref;
+               return ret;
        }
        ret = regulator_enable(mcde->epod);
        if (ret) {
                dev_err(dev, "can't enable EPOD regulator\n");
-               goto dev_unref;
+               return ret;
        }
        mcde->vana = devm_regulator_get(dev, "vana");
        if (IS_ERR(mcde->vana)) {
@@ -497,8 +478,6 @@ regulator_off:
        regulator_disable(mcde->vana);
 regulator_epod_off:
        regulator_disable(mcde->epod);
-dev_unref:
-       drm_dev_put(drm);
        return ret;
 
 }
@@ -506,13 +485,12 @@ dev_unref:
 static int mcde_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
 
        component_master_del(&pdev->dev, &mcde_drm_comp_ops);
        clk_disable_unprepare(mcde->mcde_clk);
        regulator_disable(mcde->vana);
        regulator_disable(mcde->epod);
-       drm_dev_put(drm);
 
        return 0;
 }
index 7af5ebb0c43689776f13b37a2f41a5f6a972dc35..f303369305a3cda768b183034bffdeaf7cc61f5d 100644 (file)
@@ -1020,7 +1020,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
                         void *data)
 {
        struct drm_device *drm = data;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        struct mcde_dsi *d = dev_get_drvdata(dev);
        struct device_node *child;
        struct drm_panel *panel = NULL;
@@ -1073,10 +1073,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
                        panel = NULL;
 
                        bridge = of_drm_find_bridge(child);
-                       if (IS_ERR(bridge)) {
-                               dev_err(dev, "failed to find bridge (%ld)\n",
-                                       PTR_ERR(bridge));
-                               return PTR_ERR(bridge);
+                       if (!bridge) {
+                               dev_err(dev, "failed to find bridge\n");
+                               return -EINVAL;
                        }
                }
        }
index 4f0ce4cd5b8ca3d21b139a0f8550ec0401e2e338..52a3503edd8f15a7f1cf542b6abebbe40dc7219b 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_bridge.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "mtk_dpi_regs.h"
 #include "mtk_drm_ddp_comp.h"
@@ -509,15 +510,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
        return 0;
 }
 
-static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
-       .destroy = mtk_dpi_encoder_destroy,
-};
-
 static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
                                       const struct drm_display_mode *mode,
                                       struct drm_display_mode *adjusted_mode)
@@ -596,8 +588,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
                return ret;
        }
 
-       ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                dev_err(dev, "Failed to initialize decoder: %d\n", ret);
                goto err_unregister;
index 0563c6813333ecf70fd6264065bf79cbae801b24..ce570283b55f7a140429d88e6b0e8f136ed6540f 100644 (file)
@@ -162,7 +162,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        }
        private->mutex_dev = &pdev->dev;
 
-       drm_mode_config_init(drm);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               return ret;
 
        drm->mode_config.min_width = 64;
        drm->mode_config.min_height = 64;
@@ -179,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 
        ret = component_bind_all(drm->dev, drm);
        if (ret)
-               goto err_config_cleanup;
+               return ret;
 
        /*
         * We currently support two fixed data streams, each optional,
@@ -255,8 +257,6 @@ err_unset_dma_parms:
                dma_dev->dma_parms = NULL;
 err_component_unbind:
        component_unbind_all(drm->dev, drm);
-err_config_cleanup:
-       drm_mode_config_cleanup(drm);
 
        return ret;
 }
@@ -272,7 +272,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm)
                private->dma_dev->dma_parms = NULL;
 
        component_unbind_all(drm->dev, drm);
-       drm_mode_config_cleanup(drm);
 }
 
 static const struct file_operations mtk_drm_fops = {
@@ -348,9 +347,7 @@ static int mtk_drm_bind(struct device *dev)
        if (ret < 0)
                goto err_deinit;
 
-       ret = drm_fbdev_generic_setup(drm, 32);
-       if (ret)
-               DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+       drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
index 0ede69830a9dd0c170de57c263f9b89b722fb1f1..a9a25087112fd4fa9813748c27f19ebad32b4967 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "mtk_drm_ddp_comp.h"
 
@@ -787,15 +788,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
        dsi->enabled = false;
 }
 
-static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
-       .destroy = mtk_dsi_encoder_destroy,
-};
-
 static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
                                       const struct drm_display_mode *mode,
                                       struct drm_display_mode *adjusted_mode)
@@ -888,8 +880,8 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
 {
        int ret;
 
-       ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
-                              DRM_MODE_ENCODER_DSI, NULL);
+       ret = drm_simple_encoder_init(drm, &dsi->encoder,
+                                     DRM_MODE_ENCODER_DSI);
        if (ret) {
                DRM_ERROR("Failed to encoder init to drm\n");
                return ret;
index b5f5eb7b4bb904b311f5994750e5830026a8ae1b..621f6de0f076a6176ffd5f9e929e971b59a1d9ff 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/component.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
+#include <linux/sys_soc.h>
 #include <linux/platform_device.h>
 #include <linux/soc/amlogic/meson-canvas.h>
 
@@ -183,6 +184,24 @@ static void meson_remove_framebuffers(void)
        kfree(ap);
 }
 
+struct meson_drm_soc_attr {
+       struct meson_drm_soc_limits limits;
+       const struct soc_device_attribute *attrs;
+};
+
+static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
+       /* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
+       {
+               .limits = {
+                       .max_hdmi_phy_freq = 1650000,
+               },
+               .attrs = (const struct soc_device_attribute []) {
+                       { .soc_id = "GXL (S805*)", },
+                       { /* sentinel */ },
+               }
+       },
+};
+
 static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -191,7 +210,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        struct drm_device *drm;
        struct resource *res;
        void __iomem *regs;
-       int ret;
+       int ret, i;
 
        /* Checks if an output connector is available */
        if (!meson_vpu_has_available_connectors(dev)) {
@@ -281,10 +300,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        if (ret)
                goto free_drm;
 
+       /* Assign limits per soc revision/package */
+       for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
+               if (soc_device_match(meson_drm_soc_attrs[i].attrs)) {
+                       priv->limits = &meson_drm_soc_attrs[i].limits;
+                       break;
+               }
+       }
+
        /* Remove early framebuffers (ie. simplefb) */
        meson_remove_framebuffers();
 
-       drm_mode_config_init(drm);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               goto free_drm;
        drm->mode_config.max_width = 3840;
        drm->mode_config.max_height = 2160;
        drm->mode_config.funcs = &meson_mode_config_funcs;
@@ -379,7 +408,6 @@ static void meson_drv_unbind(struct device *dev)
        drm_dev_unregister(drm);
        drm_irq_uninstall(drm);
        drm_kms_helper_poll_fini(drm);
-       drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
 }
 
index 04fdf3826643276eff31c67d2212c8d1202d561c..5b23704a80d680c0530e9a5bdfbcbfdce98b1a97 100644 (file)
@@ -30,6 +30,10 @@ struct meson_drm_match_data {
        struct meson_afbcd_ops *afbcd_ops;
 };
 
+struct meson_drm_soc_limits {
+       unsigned int max_hdmi_phy_freq;
+};
+
 struct meson_drm {
        struct device *dev;
        enum vpu_compatible compat;
@@ -48,6 +52,8 @@ struct meson_drm {
        struct drm_plane *primary_plane;
        struct drm_plane *overlay_plane;
 
+       const struct meson_drm_soc_limits *limits;
+
        /* Components Data */
        struct {
                bool osd1_enabled;
index e8c94915a4fcf3936f7a18c7fc9bdabcaa8e6016..5be963e9db05922582b3860fad5b02a11c42c721 100644 (file)
@@ -695,7 +695,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
                __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
 
-       return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
+       return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
 }
 
 /* Encoder */
index d5cbc47835bfa021252383e0ecacaf30dd66a279..35338ed1820996383ee1ef8afa717b004aa4e3f9 100644 (file)
@@ -223,7 +223,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
                        priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
                                                OSD_COLOR_MATRIX_16_RGB565;
                        break;
-               };
+               }
        }
 
        switch (fb->format->format) {
index fdf26dac9fa8ed8f7c0f9b56e83b48b790f5bc1e..0eb86943a35889e3afc9f786814cc89e07f4dcd5 100644 (file)
@@ -725,6 +725,13 @@ meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
        /* In DMT mode, path after PLL is always /10 */
        freq *= 10;
 
+       /* Check against soc revision/package limits */
+       if (priv->limits) {
+               if (priv->limits->max_hdmi_phy_freq &&
+                   freq > priv->limits->max_hdmi_phy_freq)
+                       return MODE_CLOCK_HIGH;
+       }
+
        if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
                return MODE_OK;
 
@@ -762,7 +769,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
 }
 
 enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq,
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
                              unsigned int vclk_freq)
 {
        int i;
@@ -770,6 +777,13 @@ meson_vclk_vic_supported_freq(unsigned int phy_freq,
        DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
                         phy_freq, vclk_freq);
 
+       /* Check against soc revision/package limits */
+       if (priv->limits) {
+               if (priv->limits->max_hdmi_phy_freq &&
+                   phy_freq > priv->limits->max_hdmi_phy_freq)
+                       return MODE_CLOCK_HIGH;
+       }
+
        for (i = 0 ; params[i].pixel_freq ; ++i) {
                DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
                                 i, params[i].pixel_freq,
index aed0ab2efa71dbf349cc1c365c27542666ecb754..60617aaf18dd1cedfab315cecb7d435e0d121e41 100644 (file)
@@ -25,7 +25,8 @@ enum {
 enum drm_mode_status
 meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
 enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+                             unsigned int vclk_freq);
 
 void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
                      unsigned int phy_freq, unsigned int vclk_freq,
index d491edd317ff31c6ab97baecf47d253f4e4c4237..aebc9ce43d551a82716f1aeba1dc36d5e1b3eac1 100644 (file)
@@ -260,7 +260,7 @@ int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                            uint32_t handle, uint32_t width, uint32_t height)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        struct drm_gem_object *obj;
        struct drm_gem_vram_object *gbo = NULL;
        int ret;
@@ -307,7 +307,7 @@ err_drm_gem_object_put_unlocked:
 
 int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 {
-       struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
 
        /* Our origin is at (64,64) */
        x += 64;
index 7a5bad2f57d70bbe838a46c8e29f800e4466f313..c2f0e4b40b0527fd4dccf85a22fa89008c3f2211 100644 (file)
@@ -77,6 +77,8 @@ static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto err_mgag200_driver_unload;
 
+       drm_fbdev_generic_setup(dev, 0);
+
        return 0;
 
 err_mgag200_driver_unload:
@@ -118,7 +120,7 @@ int mgag200_driver_dumb_create(struct drm_file *file,
                               struct drm_device *dev,
                               struct drm_mode_create_dumb *args)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        unsigned long pg_align;
 
        if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
index 9691252d6233ff965e32ada40415053f61f5b74b..d9b7e96b214f8a0fc604b6dc5abdda1b34092e4d 100644 (file)
@@ -104,11 +104,6 @@ struct mga_crtc {
        bool enabled;
 };
 
-struct mga_mode_info {
-       bool mode_config_initialized;
-       struct mga_crtc *crtc;
-};
-
 struct mga_i2c_chan {
        struct i2c_adapter adapter;
        struct drm_device *dev;
@@ -160,17 +155,14 @@ struct mga_device {
        void __iomem                    *rmmio;
 
        struct mga_mc                   mc;
-       struct mga_mode_info            mode_info;
 
        struct mga_cursor cursor;
 
        size_t vram_fb_available;
 
        bool                            suspended;
-       int                             num_crtc;
        enum mga_type                   type;
        int                             has_sdram;
-       struct drm_display_mode         mode;
 
        int bpp_shifts[4];
 
@@ -179,9 +171,15 @@ struct mga_device {
        /* SE model number stored in reg 0x1e24 */
        u32 unique_rev_id;
 
+       struct mga_connector connector;
        struct drm_encoder encoder;
 };
 
+static inline struct mga_device *to_mga_device(struct drm_device *dev)
+{
+       return dev->dev_private;
+}
+
 static inline enum mga_type
 mgag200_type_from_driver_data(kernel_ulong_t driver_data)
 {
@@ -196,7 +194,6 @@ mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
 
                                /* mgag200_mode.c */
 int mgag200_modeset_init(struct mga_device *mdev);
-void mgag200_modeset_fini(struct mga_device *mdev);
 
                                /* mgag200_main.c */
 int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
index 9f4635916d32252833af5664e02fdf7716a97d32..09731e614e46d72b487ed2aca62b3509a74c5f40 100644 (file)
@@ -61,34 +61,34 @@ static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
 static void mga_gpio_setsda(void *data, int state)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        mga_i2c_set(mdev, i2c->data, state);
 }
 
 static void mga_gpio_setscl(void *data, int state)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        mga_i2c_set(mdev, i2c->clock, state);
 }
 
 static int mga_gpio_getsda(void *data)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
 }
 
 static int mga_gpio_getscl(void *data)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
 }
 
 struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        struct mga_i2c_chan *i2c;
        int ret;
        int data, clock;
index e278b6a547bde6fb8e3b8e3b5f7b93f9e8348d56..86df799fd38c51c3fc406e9690f7520536eb83fc 100644 (file)
 
 #include <linux/pci.h>
 
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
 #include "mgag200_drv.h"
 
-static const struct drm_mode_config_funcs mga_mode_funcs = {
-       .fb_create = drm_gem_fb_create
-};
-
 static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
 {
        int offset;
@@ -66,51 +59,54 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
 /* Map the framebuffer from the card and configure the core */
 static int mga_vram_init(struct mga_device *mdev)
 {
+       struct drm_device *dev = mdev->dev;
        void __iomem *mem;
 
        /* BAR 0 is VRAM */
-       mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
-       mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+       mdev->mc.vram_base = pci_resource_start(dev->pdev, 0);
+       mdev->mc.vram_window = pci_resource_len(dev->pdev, 0);
 
-       if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
-                               "mgadrmfb_vram")) {
+       if (!devm_request_mem_region(dev->dev, mdev->mc.vram_base,
+                                    mdev->mc.vram_window, "mgadrmfb_vram")) {
                DRM_ERROR("can't reserve VRAM\n");
                return -ENXIO;
        }
 
-       mem = pci_iomap(mdev->dev->pdev, 0, 0);
+       mem = pci_iomap(dev->pdev, 0, 0);
        if (!mem)
                return -ENOMEM;
 
        mdev->mc.vram_size = mga_probe_vram(mdev, mem);
 
-       pci_iounmap(mdev->dev->pdev, mem);
+       pci_iounmap(dev->pdev, mem);
 
        return 0;
 }
 
-static int mgag200_device_init(struct drm_device *dev,
-                              uint32_t flags)
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev;
        int ret, option;
 
+       mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
+       if (mdev == NULL)
+               return -ENOMEM;
+       dev->dev_private = (void *)mdev;
+       mdev->dev = dev;
+
        mdev->flags = mgag200_flags_from_driver_data(flags);
        mdev->type = mgag200_type_from_driver_data(flags);
 
-       /* Hardcode the number of CRTCs to 1 */
-       mdev->num_crtc = 1;
-
        pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
        mdev->has_sdram = !(option & (1 << 14));
 
        /* BAR 0 is the framebuffer, BAR 1 contains registers */
-       mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
-       mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+       mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
+       mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
 
-       if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
-                               "mgadrmfb_mmio")) {
-               DRM_ERROR("can't reserve mmio registers\n");
+       if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
+                                    mdev->rmmio_size, "mgadrmfb_mmio")) {
+               drm_err(dev, "can't reserve mmio registers\n");
                return -ENOMEM;
        }
 
@@ -121,90 +117,43 @@ static int mgag200_device_init(struct drm_device *dev,
        /* stash G200 SE model number for later use */
        if (IS_G200_SE(mdev)) {
                mdev->unique_rev_id = RREG32(0x1e24);
-               DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
-                         mdev->unique_rev_id);
+               drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
+                       mdev->unique_rev_id);
        }
 
        ret = mga_vram_init(mdev);
        if (ret)
                return ret;
 
-       mdev->bpp_shifts[0] = 0;
-       mdev->bpp_shifts[1] = 1;
-       mdev->bpp_shifts[2] = 0;
-       mdev->bpp_shifts[3] = 2;
-       return 0;
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-
-int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
-{
-       struct mga_device *mdev;
-       int r;
-
-       mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
-       if (mdev == NULL)
-               return -ENOMEM;
-       dev->dev_private = (void *)mdev;
-       mdev->dev = dev;
-
-       r = mgag200_device_init(dev, flags);
-       if (r) {
-               dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
-               return r;
-       }
-       r = mgag200_mm_init(mdev);
-       if (r)
+       ret = mgag200_mm_init(mdev);
+       if (ret)
                goto err_mm;
 
-       drm_mode_config_init(dev);
-       dev->mode_config.funcs = (void *)&mga_mode_funcs;
-       if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
-               dev->mode_config.preferred_depth = 16;
-       else
-               dev->mode_config.preferred_depth = 32;
-       dev->mode_config.prefer_shadow = 1;
-
-       r = mgag200_modeset_init(mdev);
-       if (r) {
-               dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
-               goto err_modeset;
+       ret = mgag200_modeset_init(mdev);
+       if (ret) {
+               drm_err(dev, "Fatal error during modeset init: %d\n", ret);
+               goto err_mgag200_mm_fini;
        }
 
-       r = mgag200_cursor_init(mdev);
-       if (r)
-               dev_warn(&dev->pdev->dev,
-                       "Could not initialize cursors. Not doing hardware cursors.\n");
-
-       r = drm_fbdev_generic_setup(mdev->dev, 0);
-       if (r)
-               goto err_modeset;
+       ret = mgag200_cursor_init(mdev);
+       if (ret)
+               drm_err(dev, "Could not initialize cursors. Not doing hardware cursors.\n");
 
        return 0;
 
-err_modeset:
-       drm_mode_config_cleanup(dev);
-       mgag200_cursor_fini(mdev);
+err_mgag200_mm_fini:
        mgag200_mm_fini(mdev);
 err_mm:
        dev->dev_private = NULL;
-
-       return r;
+       return ret;
 }
 
 void mgag200_driver_unload(struct drm_device *dev)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
 
        if (mdev == NULL)
                return;
-       mgag200_modeset_fini(mdev);
-       drm_mode_config_cleanup(dev);
        mgag200_cursor_fini(mdev);
        mgag200_mm_fini(mdev);
        dev->dev_private = NULL;
index d90e83959fca17616474917257ec1fdd0923cd2d..5f4ac36a97760398cd32e1565797e17ca6b3247f 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
@@ -28,7 +29,7 @@
 static void mga_crtc_load_lut(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        struct drm_framebuffer *fb = crtc->primary->fb;
        u16 *r_ptr, *g_ptr, *b_ptr;
        int i;
@@ -728,7 +729,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
 
 static void mga_g200wb_prepare(struct drm_crtc *crtc)
 {
-       struct mga_device *mdev = crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
        u8 tmp;
        int iter_max;
 
@@ -783,7 +784,7 @@ static void mga_g200wb_prepare(struct drm_crtc *crtc)
 static void mga_g200wb_commit(struct drm_crtc *crtc)
 {
        u8 tmp;
-       struct mga_device *mdev = crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
 
        /* 1- The first step is to ensure that the vrsten and hrsten are set */
        WREG8(MGAREG_CRTCEXT_INDEX, 1);
@@ -833,7 +834,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
  */
 static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
-       struct mga_device *mdev = crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
        u32 addr;
        int count;
        u8 crtcext0;
@@ -902,7 +903,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
                                int x, int y, struct drm_framebuffer *old_fb)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        const struct drm_framebuffer *fb = crtc->primary->fb;
        int hdisplay, hsyncstart, hsyncend, htotal;
        int vdisplay, vsyncstart, vsyncend, vtotal;
@@ -1135,9 +1136,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 
        WREG8(MGA_MISC_OUT, misc);
 
-       if (adjusted_mode)
-               memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
-
        mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
 
        /* reset tagfifo */
@@ -1263,7 +1261,7 @@ static int mga_resume(struct drm_crtc *crtc)
 static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        u8 seq1 = 0, crtcext1 = 0;
 
        switch (mode) {
@@ -1317,7 +1315,7 @@ static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
 static void mga_crtc_prepare(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        u8 tmp;
 
        /*      mga_resume(crtc);*/
@@ -1353,7 +1351,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
 static void mga_crtc_commit(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
        u8 tmp;
 
@@ -1433,6 +1431,7 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
 /* CRTC setup */
 static void mga_crtc_init(struct mga_device *mdev)
 {
+       struct drm_device *dev = mdev->dev;
        struct mga_crtc *mga_crtc;
 
        mga_crtc = kzalloc(sizeof(struct mga_crtc) +
@@ -1442,14 +1441,17 @@ static void mga_crtc_init(struct mga_device *mdev)
        if (mga_crtc == NULL)
                return;
 
-       drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
+       drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
-       mdev->mode_info.crtc = mga_crtc;
 
        drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
 }
 
+/*
+ * Connector
+ */
+
 static int mga_vga_get_modes(struct drm_connector *connector)
 {
        struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1495,7 +1497,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
        struct drm_device *dev = connector->dev;
-       struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        int bpp = 32;
 
        if (IS_G200_SE(mdev)) {
@@ -1574,7 +1576,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
        struct mga_connector *mga_connector = to_mga_connector(connector);
        mgag200_i2c_destroy(mga_connector->i2c);
        drm_connector_cleanup(connector);
-       kfree(connector);
 }
 
 static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
@@ -1588,70 +1589,96 @@ static const struct drm_connector_funcs mga_vga_connector_funcs = {
        .destroy = mga_connector_destroy,
 };
 
-static struct drm_connector *mga_vga_init(struct drm_device *dev)
+static int mgag200_vga_connector_init(struct mga_device *mdev)
 {
-       struct drm_connector *connector;
-       struct mga_connector *mga_connector;
-
-       mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
-       if (!mga_connector)
-               return NULL;
-
-       connector = &mga_connector->base;
-       mga_connector->i2c = mgag200_i2c_create(dev);
-       if (!mga_connector->i2c)
-               DRM_ERROR("failed to add ddc bus\n");
+       struct drm_device *dev = mdev->dev;
+       struct mga_connector *mconnector = &mdev->connector;
+       struct drm_connector *connector = &mconnector->base;
+       struct mga_i2c_chan *i2c;
+       int ret;
 
-       drm_connector_init_with_ddc(dev, connector,
-                                   &mga_vga_connector_funcs,
-                                   DRM_MODE_CONNECTOR_VGA,
-                                   &mga_connector->i2c->adapter);
+       i2c = mgag200_i2c_create(dev);
+       if (!i2c)
+               drm_warn(dev, "failed to add DDC bus\n");
 
+       ret = drm_connector_init_with_ddc(dev, connector,
+                                         &mga_vga_connector_funcs,
+                                         DRM_MODE_CONNECTOR_VGA,
+                                         &i2c->adapter);
+       if (ret)
+               goto err_mgag200_i2c_destroy;
        drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
 
-       drm_connector_register(connector);
+       mconnector->i2c = i2c;
 
-       return connector;
+       return 0;
+
+err_mgag200_i2c_destroy:
+       mgag200_i2c_destroy(i2c);
+       return ret;
 }
 
+static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
+       .fb_create = drm_gem_fb_create
+};
+
+static unsigned int mgag200_preferred_depth(struct mga_device *mdev)
+{
+       if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
+               return 16;
+       else
+               return 32;
+}
 
 int mgag200_modeset_init(struct mga_device *mdev)
 {
+       struct drm_device *dev = mdev->dev;
        struct drm_encoder *encoder = &mdev->encoder;
-       struct drm_connector *connector;
+       struct drm_connector *connector = &mdev->connector.base;
        int ret;
 
-       mdev->mode_info.mode_config_initialized = true;
+       mdev->bpp_shifts[0] = 0;
+       mdev->bpp_shifts[1] = 1;
+       mdev->bpp_shifts[2] = 0;
+       mdev->bpp_shifts[3] = 2;
+
+       ret = drmm_mode_config_init(dev);
+       if (ret) {
+               drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
+                       ret);
+               return ret;
+       }
+
+       dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+       dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
 
-       mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
-       mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+       dev->mode_config.preferred_depth = mgag200_preferred_depth(mdev);
+       dev->mode_config.prefer_shadow = 1;
 
-       mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+       dev->mode_config.fb_base = mdev->mc.vram_base;
+
+       dev->mode_config.funcs = &mgag200_mode_config_funcs;
 
        mga_crtc_init(mdev);
 
-       ret = drm_simple_encoder_init(mdev->dev, encoder,
-                                     DRM_MODE_ENCODER_DAC);
+       ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
        if (ret) {
-               drm_err(mdev->dev,
+               drm_err(dev,
                        "drm_simple_encoder_init() failed, error %d\n",
                        ret);
                return ret;
        }
        encoder->possible_crtcs = 0x1;
 
-       connector = mga_vga_init(mdev->dev);
-       if (!connector) {
-               DRM_ERROR("mga_vga_init failed\n");
-               return -1;
+       ret = mgag200_vga_connector_init(mdev);
+       if (ret) {
+               drm_err(dev,
+                       "mgag200_vga_connector_init() failed, error %d\n",
+                       ret);
+               return ret;
        }
 
        drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
-
-void mgag200_modeset_fini(struct mga_device *mdev)
-{
-
-}
index 075ecce4b5e06a22396c9db56ee13992dcd33ce2..8cae2ca4af6ba4e22a504771f2139b5628245e84 100644 (file)
@@ -148,27 +148,19 @@ reset_set(void *data, u64 val)
 DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
 
 
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
 {
        struct drm_device *dev;
-       int ret;
 
        if (!minor)
-               return 0;
+               return;
 
        dev = minor->dev;
 
-       ret = drm_debugfs_create_files(a5xx_debugfs_list,
-                       ARRAY_SIZE(a5xx_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(a5xx_debugfs_list,
+                                ARRAY_SIZE(a5xx_debugfs_list),
+                                minor->debugfs_root, minor);
 
        debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
                            &reset_fops);
-
-       return 0;
 }
index 833468ce6b6d7bb811f17e291c25b006bde45555..54868d4e3958f318dc67dd48506c04a41969a49e 100644 (file)
@@ -41,7 +41,7 @@ struct a5xx_gpu {
 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
 
 #ifdef CONFIG_DEBUG_FS
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
 #endif
 
 /*
index 47b989834af166410c10921b0ed4f0980c42a2b5..c902c6503675f9e9cebd3e2d536e0352f146b685 100644 (file)
@@ -259,17 +259,9 @@ static struct drm_info_list mdp5_debugfs_list[] = {
 
 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       ret = drm_debugfs_create_files(mdp5_debugfs_list,
-                       ARRAY_SIZE(mdp5_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(mdp5_debugfs_list,
+                                ARRAY_SIZE(mdp5_debugfs_list),
+                                minor->debugfs_root, minor);
 
        return 0;
 }
index 1c74381a4fc9d3eaacc4ffa6f900160db8f3096d..ee2e270f464c1d7b664abff78f9aea390e258d2a 100644 (file)
@@ -214,31 +214,20 @@ int msm_debugfs_late_init(struct drm_device *dev)
        return ret;
 }
 
-int msm_debugfs_init(struct drm_minor *minor)
+void msm_debugfs_init(struct drm_minor *minor)
 {
        struct drm_device *dev = minor->dev;
        struct msm_drm_private *priv = dev->dev_private;
-       int ret;
-
-       ret = drm_debugfs_create_files(msm_debugfs_list,
-                       ARRAY_SIZE(msm_debugfs_list),
-                       minor->debugfs_root, minor);
 
-       if (ret) {
-               DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(msm_debugfs_list,
+                                ARRAY_SIZE(msm_debugfs_list),
+                                minor->debugfs_root, minor);
 
        debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
                dev, &msm_gpu_fops);
 
-       if (priv->kms && priv->kms->funcs->debugfs_init) {
-               ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
-               if (ret)
-                       return ret;
-       }
-
-       return ret;
+       if (priv->kms && priv->kms->funcs->debugfs_init)
+               priv->kms->funcs->debugfs_init(priv->kms, minor);
 }
 #endif
 
index 2b91f8c178ad37d46ac81fb7039f4c6e85588af3..ef58f66abbb341eccfbfeff9d759141e30ccc937 100644 (file)
@@ -8,7 +8,7 @@
 #define __MSM_DEBUGFS_H__
 
 #ifdef CONFIG_DEBUG_FS
-int msm_debugfs_init(struct drm_minor *minor);
+void msm_debugfs_init(struct drm_minor *minor);
 #endif
 
 #endif /* __MSM_DEBUGFS_H__ */
index be5bc2e8425c579de32cd5015e3cc71bf005c1a0..6ccae4ba905cc8c9407a32b742d5573801c8578a 100644 (file)
@@ -57,7 +57,7 @@ struct msm_gpu_funcs {
        void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
                        struct drm_printer *p);
        /* for generation specific debugfs: */
-       int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
+       void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
 #endif
        unsigned long (*gpu_busy)(struct msm_gpu *gpu);
        struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
index 15a3d40edf0292ca16797a482b0b16d73573ff21..63cb5e432f8ab19dcbcb3b7f88ee83ae913f4173 100644 (file)
@@ -217,7 +217,7 @@ static const struct nouveau_debugfs_files {
        {"pstate", &nouveau_pstate_fops},
 };
 
-int
+void
 nouveau_drm_debugfs_init(struct drm_minor *minor)
 {
        struct nouveau_drm *drm = nouveau_drm(minor->dev);
@@ -240,12 +240,10 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
         */
        dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
        if (!dentry)
-               return 0;
+               return;
 
        d_inode(dentry)->i_size = drm->vbios.length;
        dput(dentry);
-
-       return 0;
 }
 
 int
index 8909c010e8eaebfe4fe4eb08737a61e01e98be11..77f0323b38ba8cb7b20822a3b98866509e60c46f 100644 (file)
@@ -18,15 +18,13 @@ nouveau_debugfs(struct drm_device *dev)
        return nouveau_drm(dev)->debugfs;
 }
 
-extern int  nouveau_drm_debugfs_init(struct drm_minor *);
+extern void  nouveau_drm_debugfs_init(struct drm_minor *);
 extern int  nouveau_debugfs_init(struct nouveau_drm *);
 extern void nouveau_debugfs_fini(struct nouveau_drm *);
 #else
-static inline int
+static inline void
 nouveau_drm_debugfs_init(struct drm_minor *minor)
-{
-       return 0;
-}
+{}
 
 static inline int
 nouveau_debugfs_init(struct nouveau_drm *drm)
index 232a9d7c51e5d20d02277441cee77c4ecfb328a1..e770c9497871bc09086f2b4a7f5ddefe688c2327 100644 (file)
@@ -25,6 +25,9 @@
 MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
 MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
 MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
 
 static const struct nvkm_sec2_fwif
 gp108_sec2_fwif[] = {
index b6ebd95c9ba1ee907a526a472d0ce3700b0c8535..a8295653ceab17303e27a8724bf33b07367f5482 100644 (file)
@@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
        return 0;
 }
 
+MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
+
 static const struct nvkm_sec2_fwif
 tu102_sec2_fwif[] = {
        {  0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
index 03b355dabab3dc64507e3102259b1fd6451972ba..abf3eda683f0c6a274751a1783cc2d9a9758deaf 100644 (file)
@@ -36,8 +36,8 @@ probe_monitoring_device(struct nvkm_i2c_bus *bus,
 
        request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
-       client = i2c_new_device(&bus->i2c, info);
-       if (!client)
+       client = i2c_new_client_device(&bus->i2c, info);
+       if (IS_ERR(client))
                return false;
 
        if (!client->dev.driver ||
index dbb90f2d2ccde5973db67530924c358059cec747..6639ee9b05d3d611b8d055af724f057237680e52 100644 (file)
@@ -3137,33 +3137,12 @@ static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
        dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
        dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
 
-       if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
-               vs = false;
-       else
-               vs = true;
-
-       if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
-               hs = false;
-       else
-               hs = true;
-
-       if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
-               de = false;
-       else
-               de = true;
-
-       if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
-               ipc = false;
-       else
-               ipc = true;
-
-       /* always use the 'rf' setting */
-       onoff = true;
-
-       if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
-               rf = true;
-       else
-               rf = false;
+       vs = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+       hs = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+       de = !!(vm->flags & DISPLAY_FLAGS_DE_LOW);
+       ipc = !!(vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE);
+       onoff = true; /* always use the 'rf' setting */
+       rf = !!(vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE);
 
        l = FLD_VAL(onoff, 17, 17) |
                FLD_VAL(rf, 16, 16) |
index 766553bb2f87b7b7e36cc107396980d10e67be92..9701843ccf09d946c091156f0611811410c465d2 100644 (file)
@@ -208,49 +208,6 @@ static const struct venc_config venc_config_ntsc_trm = {
        .gen_ctrl                               = 0x00F90000,
 };
 
-static const struct venc_config venc_config_pal_bdghi = {
-       .f_control                              = 0,
-       .vidout_ctrl                            = 0,
-       .sync_ctrl                              = 0,
-       .hfltr_ctrl                             = 0,
-       .x_color                                = 0,
-       .line21                                 = 0,
-       .ln_sel                                 = 21,
-       .htrigger_vtrigger                      = 0,
-       .tvdetgp_int_start_stop_x               = 0x00140001,
-       .tvdetgp_int_start_stop_y               = 0x00010001,
-       .gen_ctrl                               = 0x00FB0000,
-
-       .llen                                   = 864-1,
-       .flens                                  = 625-1,
-       .cc_carr_wss_carr                       = 0x2F7625ED,
-       .c_phase                                = 0xDF,
-       .gain_u                                 = 0x111,
-       .gain_v                                 = 0x181,
-       .gain_y                                 = 0x140,
-       .black_level                            = 0x3e,
-       .blank_level                            = 0x3e,
-       .m_control                              = 0<<2 | 1<<1,
-       .bstamp_wss_data                        = 0x42,
-       .s_carr                                 = 0x2a098acb,
-       .l21__wc_ctl                            = 0<<13 | 0x16<<8 | 0<<0,
-       .savid__eavid                           = 0x06A70108,
-       .flen__fal                              = 23<<16 | 624<<0,
-       .lal__phase_reset                       = 2<<17 | 310<<0,
-       .hs_int_start_stop_x                    = 0x00920358,
-       .hs_ext_start_stop_x                    = 0x000F035F,
-       .vs_int_start_x                         = 0x1a7<<16,
-       .vs_int_stop_x__vs_int_start_y          = 0x000601A7,
-       .vs_int_stop_y__vs_ext_start_x          = 0x01AF0036,
-       .vs_ext_stop_x__vs_ext_start_y          = 0x27101af,
-       .vs_ext_stop_y                          = 0x05,
-       .avid_start_stop_x                      = 0x03530082,
-       .avid_start_stop_y                      = 0x0270002E,
-       .fid_int_start_x__fid_int_start_y       = 0x0005008A,
-       .fid_int_offset_y__fid_ext_start_x      = 0x002E0138,
-       .fid_ext_start_y__fid_ext_offset_y      = 0x01380005,
-};
-
 enum venc_videomode {
        VENC_MODE_UNKNOWN,
        VENC_MODE_PAL,
index 34dfb33145b49f047a38adf9e9626d2ad6c52be4..b57fbe8a0ac22e603849e156021f466be935172f 100644 (file)
@@ -80,31 +80,16 @@ static struct drm_info_list omap_dmm_debugfs_list[] = {
        {"tiler_map", tiler_map_show, 0},
 };
 
-int omap_debugfs_init(struct drm_minor *minor)
+void omap_debugfs_init(struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       ret = drm_debugfs_create_files(omap_debugfs_list,
-                       ARRAY_SIZE(omap_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install omap_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(omap_debugfs_list,
+                                ARRAY_SIZE(omap_debugfs_list),
+                                minor->debugfs_root, minor);
 
        if (dmm_is_available())
-               ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
-                               ARRAY_SIZE(omap_dmm_debugfs_list),
-                               minor->debugfs_root, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
-               return ret;
-       }
-
-       return ret;
+               drm_debugfs_create_files(omap_dmm_debugfs_list,
+                                        ARRAY_SIZE(omap_dmm_debugfs_list),
+                                        minor->debugfs_root, minor);
 }
 
 #endif
index 7c4b66efcaa707ab3b1d2ae5d8e3d026cb1db7ca..8a1fac680138ef4d736ff859480262e3869a864a 100644 (file)
@@ -82,6 +82,6 @@ struct omap_drm_private {
 };
 
 
-int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_init(struct drm_minor *minor);
 
 #endif /* __OMAPDRM_DRV_H__ */
index a1723c1b5fbf83054d477c6f2260159dd23b8b56..39055c1f0e2f34f2c9b559a0968bd5ea1da937a6 100644 (file)
@@ -18,6 +18,16 @@ config DRM_PANEL_ARM_VERSATILE
          reference designs. The panel is detected using special registers
          in the Versatile family syscon registers.
 
+config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596
+       tristate "ASUS Z00T TM5P5 NT35596 panel"
+       depends on GPIOLIB && OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for the ASUS TMP5P5
+         NT35596 1080x1920 video mode panel as found in some Asus
+         Zenfone 2 Laser Z00T devices.
+
 config DRM_PANEL_BOE_HIMAX8279D
        tristate "Boe Himax8279d panel"
        depends on OF
@@ -137,6 +147,17 @@ config DRM_PANEL_KINGDISPLAY_KD097D04
          24 bit RGB per pixel. It provides a MIPI DSI interface to
          the host and has a built-in LED backlight.
 
+config DRM_PANEL_LEADTEK_LTK050H3146W
+       tristate "Leadtek LTK050H3146W panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for Leadtek LTK050H3146W
+         TFT-LCD modules. The panel has a 720x1280 resolution and uses
+         24 bit RGB per pixel. It provides a MIPI DSI interface to
+         the host and has a built-in LED backlight.
+
 config DRM_PANEL_LEADTEK_LTK500HD1829
        tristate "Leadtek LTK500HD1829 panel"
        depends on OF
@@ -433,6 +454,14 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
          Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
          Video Mode panel
 
+config DRM_PANEL_VISIONOX_RM69299
+       tristate "Visionox RM69299"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       help
+         Say Y here if you want to enable support for Visionox
+         RM69299  DSI Video Mode panel.
+
 config DRM_PANEL_XINPENG_XPP055C272
        tristate "Xinpeng XPP055C272 panel driver"
        depends on OF
index 96a883cd66305c3b1e459684130d25a68d8eed0a..de74f282c4330048b668e42af1915a32e2e66766 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
 obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
 obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
 obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
 obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
@@ -46,4 +48,5 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
 obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
new file mode 100644 (file)
index 0000000..39e0f03
--- /dev/null
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tm5p5_nt35596 {
+       struct drm_panel panel;
+       struct mipi_dsi_device *dsi;
+       struct regulator_bulk_data supplies[2];
+       struct gpio_desc *reset_gpio;
+       bool prepared;
+};
+
+static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
+{
+       return container_of(panel, struct tm5p5_nt35596, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do {                                \
+               static const u8 d[] = { seq };                          \
+               int ret;                                                \
+               ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d));    \
+               if (ret < 0)                                            \
+                       return ret;                                     \
+       } while (0)
+
+#define dsi_dcs_write_seq(dsi, seq...) do {                            \
+               static const u8 d[] = { seq };                          \
+               int ret;                                                \
+               ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+               if (ret < 0)                                            \
+                       return ret;                                     \
+       } while (0)
+
+static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx)
+{
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       usleep_range(1000, 2000);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       usleep_range(1000, 2000);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       usleep_range(15000, 16000);
+}
+
+static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx)
+{
+       struct mipi_dsi_device *dsi = ctx->dsi;
+
+       dsi_generic_write_seq(dsi, 0xff, 0x05);
+       dsi_generic_write_seq(dsi, 0xfb, 0x01);
+       dsi_generic_write_seq(dsi, 0xc5, 0x31);
+       dsi_generic_write_seq(dsi, 0xff, 0x04);
+       dsi_generic_write_seq(dsi, 0x01, 0x84);
+       dsi_generic_write_seq(dsi, 0x05, 0x25);
+       dsi_generic_write_seq(dsi, 0x06, 0x01);
+       dsi_generic_write_seq(dsi, 0x07, 0x20);
+       dsi_generic_write_seq(dsi, 0x08, 0x06);
+       dsi_generic_write_seq(dsi, 0x09, 0x08);
+       dsi_generic_write_seq(dsi, 0x0a, 0x10);
+       dsi_generic_write_seq(dsi, 0x0b, 0x10);
+       dsi_generic_write_seq(dsi, 0x0c, 0x10);
+       dsi_generic_write_seq(dsi, 0x0d, 0x14);
+       dsi_generic_write_seq(dsi, 0x0e, 0x14);
+       dsi_generic_write_seq(dsi, 0x0f, 0x14);
+       dsi_generic_write_seq(dsi, 0x10, 0x14);
+       dsi_generic_write_seq(dsi, 0x11, 0x14);
+       dsi_generic_write_seq(dsi, 0x12, 0x14);
+       dsi_generic_write_seq(dsi, 0x17, 0xf3);
+       dsi_generic_write_seq(dsi, 0x18, 0xc0);
+       dsi_generic_write_seq(dsi, 0x19, 0xc0);
+       dsi_generic_write_seq(dsi, 0x1a, 0xc0);
+       dsi_generic_write_seq(dsi, 0x1b, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1c, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1d, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1e, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1f, 0xb3);
+       dsi_generic_write_seq(dsi, 0x20, 0xb3);
+       dsi_generic_write_seq(dsi, 0xfb, 0x01);
+       dsi_generic_write_seq(dsi, 0xff, 0x00);
+       dsi_generic_write_seq(dsi, 0xfb, 0x01);
+       dsi_generic_write_seq(dsi, 0x35, 0x01);
+       dsi_generic_write_seq(dsi, 0xd3, 0x06);
+       dsi_generic_write_seq(dsi, 0xd4, 0x04);
+       dsi_generic_write_seq(dsi, 0x5e, 0x0d);
+       dsi_generic_write_seq(dsi, 0x11, 0x00);
+       msleep(100);
+       dsi_generic_write_seq(dsi, 0x29, 0x00);
+       dsi_generic_write_seq(dsi, 0x53, 0x24);
+
+       return 0;
+}
+
+static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx)
+{
+       struct mipi_dsi_device *dsi = ctx->dsi;
+       struct device *dev = &dsi->dev;
+       int ret;
+
+       ret = mipi_dsi_dcs_set_display_off(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to set display off: %d\n", ret);
+               return ret;
+       }
+       msleep(60);
+
+       ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+               return ret;
+       }
+
+       dsi_dcs_write_seq(dsi, 0x4f, 0x01);
+
+       return 0;
+}
+
+static int tm5p5_nt35596_prepare(struct drm_panel *panel)
+{
+       struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+       struct device *dev = &ctx->dsi->dev;
+       int ret;
+
+       if (ctx->prepared)
+               return 0;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enable regulators: %d\n", ret);
+               return ret;
+       }
+
+       tm5p5_nt35596_reset(ctx);
+
+       ret = tm5p5_nt35596_on(ctx);
+       if (ret < 0) {
+               dev_err(dev, "Failed to initialize panel: %d\n", ret);
+               gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+               regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+                                      ctx->supplies);
+               return ret;
+       }
+
+       ctx->prepared = true;
+       return 0;
+}
+
+static int tm5p5_nt35596_unprepare(struct drm_panel *panel)
+{
+       struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+       struct device *dev = &ctx->dsi->dev;
+       int ret;
+
+       if (!ctx->prepared)
+               return 0;
+
+       ret = tm5p5_nt35596_off(ctx);
+       if (ret < 0)
+               dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+                              ctx->supplies);
+
+       ctx->prepared = false;
+       return 0;
+}
+
+static const struct drm_display_mode tm5p5_nt35596_mode = {
+       .clock = (1080 + 100 + 8 + 16) * (1920 + 4 + 2 + 4) * 60 / 1000,
+       .hdisplay = 1080,
+       .hsync_start = 1080 + 100,
+       .hsync_end = 1080 + 100 + 8,
+       .htotal = 1080 + 100 + 8 + 16,
+       .vdisplay = 1920,
+       .vsync_start = 1920 + 4,
+       .vsync_end = 1920 + 4 + 2,
+       .vtotal = 1920 + 4 + 2 + 4,
+       .vrefresh = 60,
+       .width_mm = 68,
+       .height_mm = 121,
+};
+
+static int tm5p5_nt35596_get_modes(struct drm_panel *panel,
+                                  struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(connector->dev, &tm5p5_nt35596_mode);
+       if (!mode)
+               return -ENOMEM;
+
+       drm_mode_set_name(mode);
+
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       connector->display_info.width_mm = mode->width_mm;
+       connector->display_info.height_mm = mode->height_mm;
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs tm5p5_nt35596_panel_funcs = {
+       .prepare = tm5p5_nt35596_prepare,
+       .unprepare = tm5p5_nt35596_unprepare,
+       .get_modes = tm5p5_nt35596_get_modes,
+};
+
+static int tm5p5_nt35596_bl_update_status(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       u16 brightness = bl->props.brightness;
+       int ret;
+
+       if (bl->props.power != FB_BLANK_UNBLANK ||
+           bl->props.fb_blank != FB_BLANK_UNBLANK ||
+           bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+               brightness = 0;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       return 0;
+}
+
+static int tm5p5_nt35596_bl_get_brightness(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       u16 brightness = bl->props.brightness;
+       int ret;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       return brightness & 0xff;
+}
+
+static const struct backlight_ops tm5p5_nt35596_bl_ops = {
+       .update_status = tm5p5_nt35596_bl_update_status,
+       .get_brightness = tm5p5_nt35596_bl_get_brightness,
+};
+
+static struct backlight_device *
+tm5p5_nt35596_create_backlight(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       const struct backlight_properties props = {
+               .type = BACKLIGHT_RAW,
+               .brightness = 255,
+               .max_brightness = 255,
+       };
+
+       return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+                                             &tm5p5_nt35596_bl_ops, &props);
+}
+
+static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct tm5p5_nt35596 *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->supplies[0].supply = "vdd";
+       ctx->supplies[1].supply = "vddio";
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+                                     ctx->supplies);
+       if (ret < 0) {
+               dev_err(dev, "Failed to get regulators: %d\n", ret);
+               return ret;
+       }
+
+       ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset_gpio)) {
+               ret = PTR_ERR(ctx->reset_gpio);
+               dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+               return ret;
+       }
+
+       ctx->dsi = dsi;
+       mipi_dsi_set_drvdata(dsi, ctx);
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+                         MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_EOT_PACKET |
+                         MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+       drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
+                      DRM_MODE_CONNECTOR_DSI);
+
+       ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
+       if (IS_ERR(ctx->panel.backlight)) {
+               ret = PTR_ERR(ctx->panel.backlight);
+               dev_err(dev, "Failed to create backlight: %d\n", ret);
+               return ret;
+       }
+
+       ret = drm_panel_add(&ctx->panel);
+       if (ret < 0) {
+               dev_err(dev, "Failed to add panel: %d\n", ret);
+               return ret;
+       }
+
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
+{
+       struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ret = mipi_dsi_detach(dsi);
+       if (ret < 0)
+               dev_err(&dsi->dev,
+                       "Failed to detach from DSI host: %d\n", ret);
+
+       drm_panel_remove(&ctx->panel);
+
+       return 0;
+}
+
+static const struct of_device_id tm5p5_nt35596_of_match[] = {
+       { .compatible = "asus,z00t-tm5p5-n35596" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tm5p5_nt35596_of_match);
+
+static struct mipi_dsi_driver tm5p5_nt35596_driver = {
+       .probe = tm5p5_nt35596_probe,
+       .remove = tm5p5_nt35596_remove,
+       .driver = {
+               .name = "panel-tm5p5-nt35596",
+               .of_match_table = tm5p5_nt35596_of_match,
+       },
+};
+module_mipi_dsi_driver(tm5p5_nt35596_driver);
+
+MODULE_AUTHOR("Konrad Dybcio <konradybcio@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for tm5p5 nt35596 1080p video mode dsi panel");
+MODULE_LICENSE("GPL v2");
index 48a164257d18c3571ceec393682e3c1bf77426cc..46fe1805c588080130baf4ac187342ce0e7351fc 100644 (file)
@@ -696,6 +696,34 @@ static const struct panel_desc auo_b101uan08_3_desc = {
        .init_cmds = auo_b101uan08_3_init_cmd,
 };
 
+static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+       .clock = 159916,
+       .hdisplay = 1200,
+       .hsync_start = 1200 + 80,
+       .hsync_end = 1200 + 80 + 24,
+       .htotal = 1200 + 80 + 24 + 60,
+       .vdisplay = 1920,
+       .vsync_start = 1920 + 20,
+       .vsync_end = 1920 + 20 + 4,
+       .vtotal = 1920 + 20 + 4 + 10,
+       .vrefresh = 60,
+       .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv105wum_nw0_desc = {
+       .modes = &boe_tv105wum_nw0_default_mode,
+       .bpc = 8,
+       .size = {
+               .width_mm = 141,
+               .height_mm = 226,
+       },
+       .lanes = 4,
+       .format = MIPI_DSI_FMT_RGB888,
+       .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+                     MIPI_DSI_MODE_LPM,
+       .init_cmds = boe_init_cmd,
+};
+
 static int boe_panel_get_modes(struct drm_panel *panel,
                               struct drm_connector *connector)
 {
@@ -834,6 +862,9 @@ static const struct of_device_id boe_of_match[] = {
        { .compatible = "auo,b101uan08.3",
          .data = &auo_b101uan08_3_desc
        },
+       { .compatible = "boe,tv105wum-nw0",
+         .data = &boe_tv105wum_nw0_desc
+       },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, boe_of_match);
index 09935520e606359a7ad0fe28e19dc065698e29a7..873b1c7059bd8a1ffd4b474a0c0bf7174fa080fb 100644 (file)
@@ -379,7 +379,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
                                "can't set up VCOM amplitude (%d)\n", ret);
                        return ret;
                }
-       };
+       }
 
        if (ili->vcom_high != U8_MAX) {
                ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH,
@@ -388,7 +388,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
                        dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret);
                        return ret;
                }
-       };
+       }
 
        /* Set up gamma correction */
        for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
new file mode 100644 (file)
index 0000000..5a7a31c
--- /dev/null
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk050h3146w_cmd {
+       char cmd;
+       char data;
+};
+
+struct ltk050h3146w;
+struct ltk050h3146w_desc {
+       const struct drm_display_mode *mode;
+       int (*init)(struct ltk050h3146w *ctx);
+};
+
+struct ltk050h3146w {
+       struct device *dev;
+       struct drm_panel panel;
+       struct gpio_desc *reset_gpio;
+       struct regulator *vci;
+       struct regulator *iovcc;
+       const struct ltk050h3146w_desc *panel_desc;
+       bool prepared;
+};
+
+static const struct ltk050h3146w_cmd page1_cmds[] = {
+       { 0x22, 0x0A }, /* BGR SS GS */
+       { 0x31, 0x00 }, /* column inversion */
+       { 0x53, 0xA2 }, /* VCOM1 */
+       { 0x55, 0xA2 }, /* VCOM2 */
+       { 0x50, 0x81 }, /* VREG1OUT=5V */
+       { 0x51, 0x85 }, /* VREG2OUT=-5V */
+       { 0x62, 0x0D }, /* EQT Time setting */
+/*
+ * The vendor init selected page 1 here _again_
+ * Is this supposed to be page 2?
+ */
+       { 0xA0, 0x00 },
+       { 0xA1, 0x1A },
+       { 0xA2, 0x28 },
+       { 0xA3, 0x13 },
+       { 0xA4, 0x16 },
+       { 0xA5, 0x29 },
+       { 0xA6, 0x1D },
+       { 0xA7, 0x1E },
+       { 0xA8, 0x84 },
+       { 0xA9, 0x1C },
+       { 0xAA, 0x28 },
+       { 0xAB, 0x75 },
+       { 0xAC, 0x1A },
+       { 0xAD, 0x19 },
+       { 0xAE, 0x4D },
+       { 0xAF, 0x22 },
+       { 0xB0, 0x28 },
+       { 0xB1, 0x54 },
+       { 0xB2, 0x66 },
+       { 0xB3, 0x39 },
+       { 0xC0, 0x00 },
+       { 0xC1, 0x1A },
+       { 0xC2, 0x28 },
+       { 0xC3, 0x13 },
+       { 0xC4, 0x16 },
+       { 0xC5, 0x29 },
+       { 0xC6, 0x1D },
+       { 0xC7, 0x1E },
+       { 0xC8, 0x84 },
+       { 0xC9, 0x1C },
+       { 0xCA, 0x28 },
+       { 0xCB, 0x75 },
+       { 0xCC, 0x1A },
+       { 0xCD, 0x19 },
+       { 0xCE, 0x4D },
+       { 0xCF, 0x22 },
+       { 0xD0, 0x28 },
+       { 0xD1, 0x54 },
+       { 0xD2, 0x66 },
+       { 0xD3, 0x39 },
+};
+
+static const struct ltk050h3146w_cmd page3_cmds[] = {
+       { 0x01, 0x00 },
+       { 0x02, 0x00 },
+       { 0x03, 0x73 },
+       { 0x04, 0x00 },
+       { 0x05, 0x00 },
+       { 0x06, 0x0a },
+       { 0x07, 0x00 },
+       { 0x08, 0x00 },
+       { 0x09, 0x01 },
+       { 0x0a, 0x00 },
+       { 0x0b, 0x00 },
+       { 0x0c, 0x01 },
+       { 0x0d, 0x00 },
+       { 0x0e, 0x00 },
+       { 0x0f, 0x1d },
+       { 0x10, 0x1d },
+       { 0x11, 0x00 },
+       { 0x12, 0x00 },
+       { 0x13, 0x00 },
+       { 0x14, 0x00 },
+       { 0x15, 0x00 },
+       { 0x16, 0x00 },
+       { 0x17, 0x00 },
+       { 0x18, 0x00 },
+       { 0x19, 0x00 },
+       { 0x1a, 0x00 },
+       { 0x1b, 0x00 },
+       { 0x1c, 0x00 },
+       { 0x1d, 0x00 },
+       { 0x1e, 0x40 },
+       { 0x1f, 0x80 },
+       { 0x20, 0x06 },
+       { 0x21, 0x02 },
+       { 0x22, 0x00 },
+       { 0x23, 0x00 },
+       { 0x24, 0x00 },
+       { 0x25, 0x00 },
+       { 0x26, 0x00 },
+       { 0x27, 0x00 },
+       { 0x28, 0x33 },
+       { 0x29, 0x03 },
+       { 0x2a, 0x00 },
+       { 0x2b, 0x00 },
+       { 0x2c, 0x00 },
+       { 0x2d, 0x00 },
+       { 0x2e, 0x00 },
+       { 0x2f, 0x00 },
+       { 0x30, 0x00 },
+       { 0x31, 0x00 },
+       { 0x32, 0x00 },
+       { 0x33, 0x00 },
+       { 0x34, 0x04 },
+       { 0x35, 0x00 },
+       { 0x36, 0x00 },
+       { 0x37, 0x00 },
+       { 0x38, 0x3C },
+       { 0x39, 0x35 },
+       { 0x3A, 0x01 },
+       { 0x3B, 0x40 },
+       { 0x3C, 0x00 },
+       { 0x3D, 0x01 },
+       { 0x3E, 0x00 },
+       { 0x3F, 0x00 },
+       { 0x40, 0x00 },
+       { 0x41, 0x88 },
+       { 0x42, 0x00 },
+       { 0x43, 0x00 },
+       { 0x44, 0x1F },
+       { 0x50, 0x01 },
+       { 0x51, 0x23 },
+       { 0x52, 0x45 },
+       { 0x53, 0x67 },
+       { 0x54, 0x89 },
+       { 0x55, 0xab },
+       { 0x56, 0x01 },
+       { 0x57, 0x23 },
+       { 0x58, 0x45 },
+       { 0x59, 0x67 },
+       { 0x5a, 0x89 },
+       { 0x5b, 0xab },
+       { 0x5c, 0xcd },
+       { 0x5d, 0xef },
+       { 0x5e, 0x11 },
+       { 0x5f, 0x01 },
+       { 0x60, 0x00 },
+       { 0x61, 0x15 },
+       { 0x62, 0x14 },
+       { 0x63, 0x0E },
+       { 0x64, 0x0F },
+       { 0x65, 0x0C },
+       { 0x66, 0x0D },
+       { 0x67, 0x06 },
+       { 0x68, 0x02 },
+       { 0x69, 0x07 },
+       { 0x6a, 0x02 },
+       { 0x6b, 0x02 },
+       { 0x6c, 0x02 },
+       { 0x6d, 0x02 },
+       { 0x6e, 0x02 },
+       { 0x6f, 0x02 },
+       { 0x70, 0x02 },
+       { 0x71, 0x02 },
+       { 0x72, 0x02 },
+       { 0x73, 0x02 },
+       { 0x74, 0x02 },
+       { 0x75, 0x01 },
+       { 0x76, 0x00 },
+       { 0x77, 0x14 },
+       { 0x78, 0x15 },
+       { 0x79, 0x0E },
+       { 0x7a, 0x0F },
+       { 0x7b, 0x0C },
+       { 0x7c, 0x0D },
+       { 0x7d, 0x06 },
+       { 0x7e, 0x02 },
+       { 0x7f, 0x07 },
+       { 0x80, 0x02 },
+       { 0x81, 0x02 },
+       { 0x82, 0x02 },
+       { 0x83, 0x02 },
+       { 0x84, 0x02 },
+       { 0x85, 0x02 },
+       { 0x86, 0x02 },
+       { 0x87, 0x02 },
+       { 0x88, 0x02 },
+       { 0x89, 0x02 },
+       { 0x8A, 0x02 },
+};
+
+static const struct ltk050h3146w_cmd page4_cmds[] = {
+       { 0x70, 0x00 },
+       { 0x71, 0x00 },
+       { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */
+       { 0x84, 0x0F }, /* VGH clamp level 15V */
+       { 0x85, 0x0D }, /* VGL clamp level (-10V) */
+       { 0x32, 0xAC },
+       { 0x8C, 0x80 },
+       { 0x3C, 0xF5 },
+       { 0xB5, 0x07 }, /* GAMMA OP */
+       { 0x31, 0x45 }, /* SOURCE OP */
+       { 0x3A, 0x24 }, /* PS_EN OFF */
+       { 0x88, 0x33 }, /* LVD */
+};
+
+static inline
+struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
+{
+       return container_of(panel, struct ltk050h3146w, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do {                       \
+               static const u8 d[] = { seq };                          \
+               int ret;                                                \
+               ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d));   \
+               if (ret < 0)                                            \
+                       return ret;                                     \
+       } while (0)
+
+static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       /*
+        * Init sequence was supplied by the panel vendor without much
+        * documentation.
+        */
+       dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
+       dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+                         0x01);
+       dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
+       dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
+       dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+       dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+       dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+                         0x28, 0x04, 0xcc, 0xcc, 0xcc);
+       dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
+       dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
+       dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
+       dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
+       dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+                         0x80);
+       dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+                         0x16, 0x00, 0x00);
+       dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+                         0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+                         0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+                         0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+                         0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+       dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+                         0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+                         0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+                         0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+                         0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+                         0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+                         0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+       dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+                         0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+                         0x21, 0x00, 0x60);
+       dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
+       dsi_dcs_write_seq(dsi, 0xde, 0x02);
+       dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
+       dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+       dsi_dcs_write_seq(dsi, 0xc1, 0x11);
+       dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+       dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+       dsi_dcs_write_seq(dsi, 0xde, 0x00);
+
+       ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+                             ret);
+               return ret;
+       }
+
+       msleep(60);
+
+       return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_mode = {
+       .hdisplay       = 720,
+       .hsync_start    = 720 + 42,
+       .hsync_end      = 720 + 42 + 8,
+       .htotal         = 720 + 42 + 8 + 42,
+       .vdisplay       = 1280,
+       .vsync_start    = 1280 + 12,
+       .vsync_end      = 1280 + 12 + 4,
+       .vtotal         = 1280 + 12 + 4 + 18,
+       .clock          = 64018,
+       .width_mm       = 62,
+       .height_mm      = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_data = {
+       .mode = &ltk050h3146w_mode,
+       .init = ltk050h3146w_init_sequence,
+};
+
+static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       u8 d[3] = { 0x98, 0x81, page };
+
+       return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d));
+}
+
+static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
+                                     const struct ltk050h3146w_cmd *cmds,
+                                     int num)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int i, ret;
+
+       ret = ltk050h3146w_a2_select_page(ctx, page);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n",
+                             page, ret);
+               return ret;
+       }
+
+       for (i = 0; i < num; i++) {
+               ret = mipi_dsi_generic_write(dsi, &cmds[i],
+                                            sizeof(struct ltk050h3146w_cmd));
+               if (ret < 0) {
+                       DRM_DEV_ERROR(ctx->dev,
+                                     "failed to write page %d init cmds: %d\n",
+                                      page, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       /*
+        * Init sequence was supplied by the panel vendor without much
+        * documentation.
+        */
+       ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds,
+                                        ARRAY_SIZE(page3_cmds));
+       if (ret < 0)
+               return ret;
+
+       ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds,
+                                        ARRAY_SIZE(page4_cmds));
+       if (ret < 0)
+               return ret;
+
+       ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds,
+                                        ARRAY_SIZE(page1_cmds));
+       if (ret < 0)
+               return ret;
+
+       ret = ltk050h3146w_a2_select_page(ctx, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret);
+               return ret;
+       }
+
+       /* vendor code called this without param, where there should be one */
+       ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+                             ret);
+               return ret;
+       }
+
+       msleep(60);
+
+       return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_a2_mode = {
+       .hdisplay       = 720,
+       .hsync_start    = 720 + 42,
+       .hsync_end      = 720 + 42 + 10,
+       .htotal         = 720 + 42 + 10 + 60,
+       .vdisplay       = 1280,
+       .vsync_start    = 1280 + 18,
+       .vsync_end      = 1280 + 18 + 4,
+       .vtotal         = 1280 + 18 + 4 + 12,
+       .clock          = 65595,
+       .width_mm       = 62,
+       .height_mm      = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_a2_data = {
+       .mode = &ltk050h3146w_a2_mode,
+       .init = ltk050h3146w_a2_init_sequence,
+};
+
+static int ltk050h3146w_unprepare(struct drm_panel *panel)
+{
+       struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       if (!ctx->prepared)
+               return 0;
+
+       ret = mipi_dsi_dcs_set_display_off(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+                             ret);
+               return ret;
+       }
+
+       mipi_dsi_dcs_enter_sleep_mode(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+                             ret);
+               return ret;
+       }
+
+       regulator_disable(ctx->iovcc);
+       regulator_disable(ctx->vci);
+
+       ctx->prepared = false;
+
+       return 0;
+}
+
+static int ltk050h3146w_prepare(struct drm_panel *panel)
+{
+       struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       if (ctx->prepared)
+               return 0;
+
+       DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+       ret = regulator_enable(ctx->vci);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev,
+                             "Failed to enable vci supply: %d\n", ret);
+               return ret;
+       }
+       ret = regulator_enable(ctx->iovcc);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev,
+                             "Failed to enable iovcc supply: %d\n", ret);
+               goto disable_vci;
+       }
+
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       usleep_range(5000, 6000);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       msleep(20);
+
+       ret = ctx->panel_desc->init(ctx);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+                             ret);
+               goto disable_iovcc;
+       }
+
+       ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+               goto disable_iovcc;
+       }
+
+       /* T9: 120ms */
+       msleep(120);
+
+       ret = mipi_dsi_dcs_set_display_on(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+               goto disable_iovcc;
+       }
+
+       msleep(50);
+
+       ctx->prepared = true;
+
+       return 0;
+
+disable_iovcc:
+       regulator_disable(ctx->iovcc);
+disable_vci:
+       regulator_disable(ctx->vci);
+       return ret;
+}
+
+static int ltk050h3146w_get_modes(struct drm_panel *panel,
+                                 struct drm_connector *connector)
+{
+       struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode);
+       if (!mode)
+               return -ENOMEM;
+
+       drm_mode_set_name(mode);
+
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       connector->display_info.width_mm = mode->width_mm;
+       connector->display_info.height_mm = mode->height_mm;
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs ltk050h3146w_funcs = {
+       .unprepare      = ltk050h3146w_unprepare,
+       .prepare        = ltk050h3146w_prepare,
+       .get_modes      = ltk050h3146w_get_modes,
+};
+
+static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct ltk050h3146w *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->panel_desc = of_device_get_match_data(dev);
+       if (!ctx->panel_desc)
+               return -EINVAL;
+
+       ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset_gpio)) {
+               DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+               return PTR_ERR(ctx->reset_gpio);
+       }
+
+       ctx->vci = devm_regulator_get(dev, "vci");
+       if (IS_ERR(ctx->vci)) {
+               ret = PTR_ERR(ctx->vci);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dev,
+                                     "Failed to request vci regulator: %d\n",
+                                     ret);
+               return ret;
+       }
+
+       ctx->iovcc = devm_regulator_get(dev, "iovcc");
+       if (IS_ERR(ctx->iovcc)) {
+               ret = PTR_ERR(ctx->iovcc);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dev,
+                                     "Failed to request iovcc regulator: %d\n",
+                                     ret);
+               return ret;
+       }
+
+       mipi_dsi_set_drvdata(dsi, ctx);
+
+       ctx->dev = dev;
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+                         MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+       drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
+                      DRM_MODE_CONNECTOR_DSI);
+
+       ret = drm_panel_of_backlight(&ctx->panel);
+       if (ret)
+               return ret;
+
+       drm_panel_add(&ctx->panel);
+
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+               drm_panel_remove(&ctx->panel);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
+{
+       struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ret = drm_panel_unprepare(&ctx->panel);
+       if (ret < 0)
+               DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+                             ret);
+
+       ret = drm_panel_disable(&ctx->panel);
+       if (ret < 0)
+               DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+                             ret);
+}
+
+static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
+{
+       struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ltk050h3146w_shutdown(dsi);
+
+       ret = mipi_dsi_detach(dsi);
+       if (ret < 0)
+               DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+                             ret);
+
+       drm_panel_remove(&ctx->panel);
+
+       return 0;
+}
+
+static const struct of_device_id ltk050h3146w_of_match[] = {
+       {
+               .compatible = "leadtek,ltk050h3146w",
+               .data = &ltk050h3146w_data,
+       },
+       {
+               .compatible = "leadtek,ltk050h3146w-a2",
+               .data = &ltk050h3146w_a2_data,
+       },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match);
+
+static struct mipi_dsi_driver ltk050h3146w_driver = {
+       .driver = {
+               .name = "panel-leadtek-ltk050h3146w",
+               .of_match_table = ltk050h3146w_of_match,
+       },
+       .probe  = ltk050h3146w_probe,
+       .remove = ltk050h3146w_remove,
+       .shutdown = ltk050h3146w_shutdown,
+};
+module_mipi_dsi_driver(ltk050h3146w_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
index 76ecf2de9c4457eb500b22b06dc807b00ed0b3ca..113ab9c0396b2099d2af04716066dc8a02bed889 100644 (file)
@@ -377,7 +377,7 @@ static const struct drm_display_mode default_mode = {
        .vsync_end      = 1280 + 30 + 4,
        .vtotal         = 1280 + 30 + 4 + 12,
        .vrefresh       = 60,
-       .clock          = 41600,
+       .clock          = 69217,
        .width_mm       = 62,
        .height_mm      = 110,
 };
index a470810f7dbef8ecd54595227e4c821a0397b758..05cae8d62d56329e65259929226fa0498683a067 100644 (file)
@@ -49,7 +49,8 @@ enum nt39016_regs {
 #define NT39016_SYSTEM_STANDBY BIT(1)
 
 struct nt39016_panel_info {
-       struct drm_display_mode display_mode;
+       const struct drm_display_mode *display_modes;
+       unsigned int num_modes;
        u16 width_mm, height_mm;
        u32 bus_format, bus_flags;
 };
@@ -212,15 +213,22 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
        struct nt39016 *panel = to_nt39016(drm_panel);
        const struct nt39016_panel_info *panel_info = panel->panel_info;
        struct drm_display_mode *mode;
+       unsigned int i;
 
-       mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
-       if (!mode)
-               return -ENOMEM;
+       for (i = 0; i < panel_info->num_modes; i++) {
+               mode = drm_mode_duplicate(connector->dev,
+                                         &panel_info->display_modes[i]);
+               if (!mode)
+                       return -ENOMEM;
+
+               drm_mode_set_name(mode);
 
-       drm_mode_set_name(mode);
+               mode->type = DRM_MODE_TYPE_DRIVER;
+               if (panel_info->num_modes == 1)
+                       mode->type |= DRM_MODE_TYPE_PREFERRED;
 
-       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-       drm_mode_probed_add(connector, mode);
+               drm_mode_probed_add(connector, mode);
+       }
 
        connector->display_info.bpc = 8;
        connector->display_info.width_mm = panel_info->width_mm;
@@ -230,7 +238,7 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
                                         &panel_info->bus_format, 1);
        connector->display_info.bus_flags = panel_info->bus_flags;
 
-       return 1;
+       return panel_info->num_modes;
 }
 
 static const struct drm_panel_funcs nt39016_funcs = {
@@ -316,8 +324,8 @@ static int nt39016_remove(struct spi_device *spi)
        return 0;
 }
 
-static const struct nt39016_panel_info kd035g6_info = {
-       .display_mode = {
+static const struct drm_display_mode kd035g6_display_modes[] = {
+       {       /* 60 Hz */
                .clock = 6000,
                .hdisplay = 320,
                .hsync_start = 320 + 10,
@@ -330,6 +338,24 @@ static const struct nt39016_panel_info kd035g6_info = {
                .vrefresh = 60,
                .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
        },
+       {       /* 50 Hz */
+               .clock = 5400,
+               .hdisplay = 320,
+               .hsync_start = 320 + 42,
+               .hsync_end = 320 + 42 + 50,
+               .htotal = 320 + 42 + 50 + 20,
+               .vdisplay = 240,
+               .vsync_start = 240 + 5,
+               .vsync_end = 240 + 5 + 1,
+               .vtotal = 240 + 5 + 1 + 4,
+               .vrefresh = 50,
+               .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+       },
+};
+
+static const struct nt39016_panel_info kd035g6_info = {
+       .display_modes = kd035g6_display_modes,
+       .num_modes = ARRAY_SIZE(kd035g6_display_modes),
        .width_mm = 71,
        .height_mm = 53,
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
index 3ad828eaefe1ca1c5755b5f0835392aca00cafd8..b6ecd1552132ed308157faf16a77f4e65a7feee5 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
@@ -108,6 +109,7 @@ struct panel_simple {
        struct i2c_adapter *ddc;
 
        struct gpio_desc *enable_gpio;
+       struct gpio_desc *hpd_gpio;
 
        struct drm_display_mode override_mode;
 };
@@ -259,11 +261,37 @@ static int panel_simple_unprepare(struct drm_panel *panel)
        return 0;
 }
 
+static int panel_simple_get_hpd_gpio(struct device *dev,
+                                    struct panel_simple *p, bool from_probe)
+{
+       int err;
+
+       p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+       if (IS_ERR(p->hpd_gpio)) {
+               err = PTR_ERR(p->hpd_gpio);
+
+               /*
+                * If we're called from probe we won't consider '-EPROBE_DEFER'
+                * to be an error--we'll leave the error code in "hpd_gpio".
+                * When we try to use it we'll try again.  This allows for
+                * circular dependencies where the component providing the
+                * hpd gpio needs the panel to init before probing.
+                */
+               if (err != -EPROBE_DEFER || !from_probe) {
+                       dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
 static int panel_simple_prepare(struct drm_panel *panel)
 {
        struct panel_simple *p = to_panel_simple(panel);
        unsigned int delay;
        int err;
+       int hpd_asserted;
 
        if (p->prepared)
                return 0;
@@ -282,6 +310,26 @@ static int panel_simple_prepare(struct drm_panel *panel)
        if (delay)
                msleep(delay);
 
+       if (p->hpd_gpio) {
+               if (IS_ERR(p->hpd_gpio)) {
+                       err = panel_simple_get_hpd_gpio(panel->dev, p, false);
+                       if (err)
+                               return err;
+               }
+
+               err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+                                        hpd_asserted, hpd_asserted,
+                                        1000, 2000000);
+               if (hpd_asserted < 0)
+                       err = hpd_asserted;
+
+               if (err) {
+                       dev_err(panel->dev,
+                               "error waiting for hpd GPIO: %d\n", err);
+                       return err;
+               }
+       }
+
        p->prepared = true;
 
        return 0;
@@ -462,6 +510,11 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
        panel->desc = desc;
 
        panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+       if (!panel->no_hpd) {
+               err = panel_simple_get_hpd_gpio(dev, panel, true);
+               if (err)
+                       return err;
+       }
 
        panel->supply = devm_regulator_get(dev, "power");
        if (IS_ERR(panel->supply))
@@ -836,7 +889,8 @@ static const struct panel_desc auo_g101evn010 = {
                .width = 216,
                .height = 135,
        },
-       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
 static const struct drm_display_mode auo_g104sn02_mode = {
@@ -862,6 +916,31 @@ static const struct panel_desc auo_g104sn02 = {
        },
 };
 
+static const struct drm_display_mode auo_g121ean01_mode = {
+       .clock = 66700,
+       .hdisplay = 1280,
+       .hsync_start = 1280 + 58,
+       .hsync_end = 1280 + 58 + 8,
+       .htotal = 1280 + 58 + 8 + 70,
+       .vdisplay = 800,
+       .vsync_start = 800 + 6,
+       .vsync_end = 800 + 6 + 4,
+       .vtotal = 800 + 6 + 4 + 10,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g121ean01 = {
+       .modes = &auo_g121ean01_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 261,
+               .height = 163,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing auo_g133han01_timings = {
        .pixelclock = { 134000000, 141200000, 149000000 },
        .hactive = { 1920, 1920, 1920 },
@@ -892,6 +971,31 @@ static const struct panel_desc auo_g133han01 = {
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
+static const struct drm_display_mode auo_g156xtn01_mode = {
+       .clock = 76000,
+       .hdisplay = 1366,
+       .hsync_start = 1366 + 33,
+       .hsync_end = 1366 + 33 + 67,
+       .htotal = 1560,
+       .vdisplay = 768,
+       .vsync_start = 768 + 4,
+       .vsync_end = 768 + 4 + 4,
+       .vtotal = 806,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g156xtn01 = {
+       .modes = &auo_g156xtn01_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 344,
+               .height = 194,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing auo_g185han01_timings = {
        .pixelclock = { 120000000, 144000000, 175000000 },
        .hactive = { 1920, 1920, 1920 },
@@ -922,6 +1026,36 @@ static const struct panel_desc auo_g185han01 = {
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
+static const struct display_timing auo_g190ean01_timings = {
+       .pixelclock = { 90000000, 108000000, 135000000 },
+       .hactive = { 1280, 1280, 1280 },
+       .hfront_porch = { 126, 184, 1266 },
+       .hback_porch = { 84, 122, 844 },
+       .hsync_len = { 70, 102, 704 },
+       .vactive = { 1024, 1024, 1024 },
+       .vfront_porch = { 4, 26, 76 },
+       .vback_porch = { 2, 8, 25 },
+       .vsync_len = { 2, 8, 25 },
+};
+
+static const struct panel_desc auo_g190ean01 = {
+       .timings = &auo_g190ean01_timings,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 376,
+               .height = 301,
+       },
+       .delay = {
+               .prepare = 50,
+               .enable = 200,
+               .disable = 110,
+               .unprepare = 1000,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing auo_p320hvn03_timings = {
        .pixelclock = { 106000000, 148500000, 164000000 },
        .hactive = { 1920, 1920, 1920 },
@@ -1092,6 +1226,38 @@ static const struct panel_desc boe_nv101wxmn51 = {
        },
 };
 
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+       .clock = 147840,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 48,
+       .hsync_end = 1920 + 48 + 32,
+       .htotal = 1920 + 48 + 32 + 200,
+       .vdisplay = 1080,
+       .vsync_start = 1080 + 3,
+       .vsync_end = 1080 + 3 + 6,
+       .vtotal = 1080 + 3 + 6 + 31,
+       .vrefresh = 60,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+       .modes = &boe_nv133fhm_n61_modes,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 294,
+               .height = 165,
+       },
+       .delay = {
+               .hpd_absent_delay = 200,
+               .unprepare = 500,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
        {
                .clock = 148500,
@@ -1980,6 +2146,37 @@ static const struct panel_desc innolux_zj070na_01p = {
        },
 };
 
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+       .clock = 138778,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 24,
+       .hsync_end = 1920 + 24 + 48,
+       .htotal = 1920 + 24 + 48 + 88,
+       .vdisplay = 1080,
+       .vsync_start = 1080 + 3,
+       .vsync_end = 1080 + 3 + 12,
+       .vtotal = 1080 + 3 + 12 + 17,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+       .modes = &ivo_m133nwf4_r0_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 294,
+               .height = 165,
+       },
+       .delay = {
+               .hpd_absent_delay = 200,
+               .unprepare = 500,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 static const struct display_timing koe_tx14d24vm1bpa_timing = {
        .pixelclock = { 5580000, 5850000, 6200000 },
        .hactive = { 320, 320, 320 },
@@ -2168,6 +2365,7 @@ static const struct panel_desc lg_lp120up1 = {
                .width = 267,
                .height = 183,
        },
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
 };
 
 static const struct drm_display_mode lg_lp129qe_mode = {
@@ -3065,6 +3263,32 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
        .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode starry_kr070pe2t_mode = {
+       .clock = 33000,
+       .hdisplay = 800,
+       .hsync_start = 800 + 209,
+       .hsync_end = 800 + 209 + 1,
+       .htotal = 800 + 209 + 1 + 45,
+       .vdisplay = 480,
+       .vsync_start = 480 + 22,
+       .vsync_end = 480 + 22 + 1,
+       .vtotal = 480 + 22 + 1 + 22,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc starry_kr070pe2t = {
+       .modes = &starry_kr070pe2t_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 152,
+               .height = 86,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct drm_display_mode starry_kr122ea0sra_mode = {
        .clock = 147000,
        .hdisplay = 1920,
@@ -3454,12 +3678,21 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "auo,g104sn02",
                .data = &auo_g104sn02,
+       }, {
+               .compatible = "auo,g121ean01",
+               .data = &auo_g121ean01,
        }, {
                .compatible = "auo,g133han01",
                .data = &auo_g133han01,
+       }, {
+               .compatible = "auo,g156xtn01",
+               .data = &auo_g156xtn01,
        }, {
                .compatible = "auo,g185han01",
                .data = &auo_g185han01,
+       }, {
+               .compatible = "auo,g190ean01",
+               .data = &auo_g190ean01,
        }, {
                .compatible = "auo,p320hvn03",
                .data = &auo_p320hvn03,
@@ -3478,6 +3711,12 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "boe,nv101wxmn51",
                .data = &boe_nv101wxmn51,
+       }, {
+               .compatible = "boe,nv133fhm-n61",
+               .data = &boe_nv133fhm_n61,
+       }, {
+               .compatible = "boe,nv133fhm-n62",
+               .data = &boe_nv133fhm_n61,
        }, {
                .compatible = "boe,nv140fhmn49",
                .data = &boe_nv140fhmn49,
@@ -3586,6 +3825,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "innolux,zj070na-01p",
                .data = &innolux_zj070na_01p,
+       }, {
+               .compatible = "ivo,m133nwf4-r0",
+               .data = &ivo_m133nwf4_r0,
        }, {
                .compatible = "koe,tx14d24vm1bpa",
                .data = &koe_tx14d24vm1bpa,
@@ -3715,6 +3957,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "shelly,sca07010-bfn-lnn",
                .data = &shelly_sca07010_bfn_lnn,
+       }, {
+               .compatible = "starry,kr070pe2t",
+               .data = &starry_kr070pe2t,
        }, {
                .compatible = "starry,kr122ea0sra",
                .data = &starry_kr122ea0sra,
index 012ca62bf30e6939966a465f2bad9142b3adbc7d..f0ad6081570f570c548cbcea7a5290c91b1cd052 100644 (file)
@@ -490,9 +490,7 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
 {
        struct device *dev = ctx->dev;
        int ret, i;
-       const struct nt35597_config *config;
 
-       config = ctx->config;
        for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
                ctx->supplies[i].supply = regulator_names[i];
 
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
new file mode 100644 (file)
index 0000000..42f299a
--- /dev/null
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct visionox_rm69299 {
+       struct drm_panel panel;
+       struct regulator_bulk_data supplies[2];
+       struct gpio_desc *reset_gpio;
+       struct mipi_dsi_device *dsi;
+       bool prepared;
+       bool enabled;
+};
+
+static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
+{
+       return container_of(panel, struct visionox_rm69299, panel);
+}
+
+static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
+{
+       int ret;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Reset sequence of visionox panel requires the panel to be
+        * out of reset for 10ms, followed by being held in reset
+        * for 10ms and then out again
+        */
+       gpiod_set_value(ctx->reset_gpio, 1);
+       usleep_range(10000, 20000);
+       gpiod_set_value(ctx->reset_gpio, 0);
+       usleep_range(10000, 20000);
+       gpiod_set_value(ctx->reset_gpio, 1);
+       usleep_range(10000, 20000);
+
+       return 0;
+}
+
+static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
+{
+       gpiod_set_value(ctx->reset_gpio, 0);
+
+       return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int visionox_rm69299_unprepare(struct drm_panel *panel)
+{
+       struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+       int ret;
+
+       ctx->dsi->mode_flags = 0;
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+       if (ret < 0)
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "set_display_off cmd failed ret = %d\n", ret);
+
+       /* 120ms delay required here as per DCS spec */
+       msleep(120);
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "enter_sleep cmd failed ret = %d\n", ret);
+       }
+
+       ret = visionox_rm69299_power_off(ctx);
+
+       ctx->prepared = false;
+       return ret;
+}
+
+static int visionox_rm69299_prepare(struct drm_panel *panel)
+{
+       struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+       int ret;
+
+       if (ctx->prepared)
+               return 0;
+
+       ret = visionox_rm69299_power_on(ctx);
+       if (ret < 0)
+               return ret;
+
+       ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 0 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 1 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 2 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 3 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "exit_sleep_mode cmd failed ret = %d\n", ret);
+               goto power_off;
+       }
+
+       /* Per DSI spec wait 120ms after sending exit sleep DCS command */
+       msleep(120);
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "set_display_on cmd failed ret = %d\n", ret);
+               goto power_off;
+       }
+
+       /* Per DSI spec wait 120ms after sending set_display_on DCS command */
+       msleep(120);
+
+       ctx->prepared = true;
+
+       return 0;
+
+power_off:
+       return ret;
+}
+
+static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
+       .name = "1080x2248",
+       .clock = 158695,
+       .hdisplay = 1080,
+       .hsync_start = 1080 + 26,
+       .hsync_end = 1080 + 26 + 2,
+       .htotal = 1080 + 26 + 2 + 36,
+       .vdisplay = 2248,
+       .vsync_start = 2248 + 56,
+       .vsync_end = 2248 + 56 + 4,
+       .vtotal = 2248 + 56 + 4 + 4,
+       .vrefresh = 60,
+       .flags = 0,
+};
+
+static int visionox_rm69299_get_modes(struct drm_panel *panel,
+                                     struct drm_connector *connector)
+{
+       struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_create(connector->dev);
+       if (!mode) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "failed to create a new display mode\n");
+               return 0;
+       }
+
+       connector->display_info.width_mm = 74;
+       connector->display_info.height_mm = 131;
+       drm_mode_copy(mode, &visionox_rm69299_1080x2248_60hz);
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
+       .unprepare = visionox_rm69299_unprepare,
+       .prepare = visionox_rm69299_prepare,
+       .get_modes = visionox_rm69299_get_modes,
+};
+
+static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct visionox_rm69299 *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       mipi_dsi_set_drvdata(dsi, ctx);
+
+       ctx->panel.dev = dev;
+       ctx->dsi = dsi;
+
+       ctx->supplies[0].supply = "vdda";
+       ctx->supplies[1].supply = "vdd3p3";
+
+       ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
+                                     ctx->supplies);
+       if (ret < 0)
+               return ret;
+
+       ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
+                                        "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset_gpio)) {
+               DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+                             PTR_ERR(ctx->reset_gpio));
+               return PTR_ERR(ctx->reset_gpio);
+       }
+
+       drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
+                      DRM_MODE_CONNECTOR_DSI);
+       ctx->panel.dev = dev;
+       ctx->panel.funcs = &visionox_rm69299_drm_funcs;
+       drm_panel_add(&ctx->panel);
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+                         MIPI_DSI_CLOCK_NON_CONTINUOUS;
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "dsi attach failed ret = %d\n", ret);
+               goto err_dsi_attach;
+       }
+
+       ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
+       if (ret) {
+               DRM_DEV_ERROR(dev,
+                             "regulator set load failed for vdda supply ret = %d\n",
+                             ret);
+               goto err_set_load;
+       }
+
+       ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
+       if (ret) {
+               DRM_DEV_ERROR(dev,
+                             "regulator set load failed for vdd3p3 supply ret = %d\n",
+                             ret);
+               goto err_set_load;
+       }
+
+       return 0;
+
+err_set_load:
+       mipi_dsi_detach(dsi);
+err_dsi_attach:
+       drm_panel_remove(&ctx->panel);
+       return ret;
+}
+
+static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+{
+       struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+
+       mipi_dsi_detach(ctx->dsi);
+       mipi_dsi_device_unregister(ctx->dsi);
+
+       drm_panel_remove(&ctx->panel);
+       return 0;
+}
+
+static const struct of_device_id visionox_rm69299_of_match[] = {
+       { .compatible = "visionox,rm69299-1080p-display", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
+
+static struct mipi_dsi_driver visionox_rm69299_driver = {
+       .driver = {
+               .name = "panel-visionox-rm69299",
+               .of_match_table = visionox_rm69299_of_match,
+       },
+       .probe = visionox_rm69299_probe,
+       .remove = visionox_rm69299_remove,
+};
+module_mipi_dsi_driver(visionox_rm69299_driver);
+
+MODULE_DESCRIPTION("Visionox RM69299 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
index 0c70f0e91d218aad577dfe02ff62efc91cd89edd..67d430d433e0c1f5a27143e2cfde1f9364a1ed6c 100644 (file)
@@ -3,7 +3,6 @@ pl111_drm-y +=  pl111_display.o \
                pl111_versatile.o \
                pl111_drv.o
 
-pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
 pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
 pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
 
index 3c8e820168543755698bbbbdae7cbee634177dd1..26ca8cdf3e6064093a7fe77a3bdef70ed5f1616f 100644 (file)
@@ -51,10 +51,10 @@ static const struct drm_info_list pl111_debugfs_list[] = {
        {"regs", pl111_debugfs_regs, 0},
 };
 
-int
+void
 pl111_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(pl111_debugfs_list,
-                                       ARRAY_SIZE(pl111_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(pl111_debugfs_list,
+                                ARRAY_SIZE(pl111_debugfs_list),
+                                minor->debugfs_root, minor);
 }
index 77d2da9a8a7ca829e1672aa386f08e9fb0a4ef18..ba399bcb792f4fa233766ea5263c4ef44a6171ff 100644 (file)
@@ -84,6 +84,6 @@ struct pl111_drm_dev_private {
 
 int pl111_display_init(struct drm_device *dev);
 irqreturn_t pl111_irq(int irq, void *data);
-int pl111_debugfs_init(struct drm_minor *minor);
+void pl111_debugfs_init(struct drm_minor *minor);
 
 #endif /* _PL111_DRM_H_ */
index aa8aa8d9e405a4cf92bdd831eb2cf0a39fd64805..da0c39dae874779fd17e633ffd69c00bc363eb81 100644 (file)
@@ -90,10 +90,13 @@ static int pl111_modeset_init(struct drm_device *dev)
        struct drm_panel *panel = NULL;
        struct drm_bridge *bridge = NULL;
        bool defer = false;
-       int ret = 0;
+       int ret;
        int i;
 
-       drm_mode_config_init(dev);
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
+
        mode_config = &dev->mode_config;
        mode_config->funcs = &mode_config_funcs;
        mode_config->min_width = 1;
@@ -154,7 +157,7 @@ static int pl111_modeset_init(struct drm_device *dev)
                                                    DRM_MODE_CONNECTOR_Unknown);
                if (IS_ERR(bridge)) {
                        ret = PTR_ERR(bridge);
-                       goto out_config;
+                       goto finish;
                }
        } else if (bridge) {
                dev_info(dev->dev, "Using non-panel bridge\n");
@@ -197,8 +200,6 @@ static int pl111_modeset_init(struct drm_device *dev)
 out_bridge:
        if (panel)
                drm_panel_bridge_remove(bridge);
-out_config:
-       drm_mode_config_cleanup(dev);
 finish:
        return ret;
 }
@@ -343,7 +344,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
        drm_dev_unregister(drm);
        if (priv->panel)
                drm_panel_bridge_remove(priv->bridge);
-       drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
        of_reserved_mem_device_release(dev);
 
@@ -444,6 +444,7 @@ static const struct amba_id pl111_id_table[] = {
        },
        {0, 0},
 };
+MODULE_DEVICE_TABLE(amba, pl111_id_table);
 
 static struct amba_driver pl111_amba_driver __maybe_unused = {
        .drv = {
index 4f325c410b5d1cab92f155f173ce2b5f2fc51d7a..64f01a4e6767fa6ce9bbb33314edbbbd8768f42d 100644 (file)
@@ -8,9 +8,9 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/regmap.h>
+#include <linux/vexpress.h>
 
 #include "pl111_versatile.h"
-#include "pl111_vexpress.h"
 #include "pl111_drm.h"
 
 static struct regmap *versatile_syscon_map;
@@ -361,13 +361,110 @@ static const struct pl111_variant_data pl111_vexpress = {
        .broken_clockdivider = true,
 };
 
+#define VEXPRESS_FPGAMUX_MOTHERBOARD           0x00
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1       0x01
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2       0x02
+
+static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
+                                   struct pl111_drm_dev_private *priv)
+{
+       struct platform_device *pdev;
+       struct device_node *root;
+       struct device_node *child;
+       struct device_node *ct_clcd = NULL;
+       struct regmap *map;
+       bool has_coretile_clcd = false;
+       bool has_coretile_hdlcd = false;
+       bool mux_motherboard = true;
+       u32 val;
+       int ret;
+
+       if (!IS_ENABLED(CONFIG_VEXPRESS_CONFIG))
+               return -ENODEV;
+
+       /*
+        * Check if we have a CLCD or HDLCD on the core tile by checking if a
+        * CLCD or HDLCD is available in the root of the device tree.
+        */
+       root = of_find_node_by_path("/");
+       if (!root)
+               return -EINVAL;
+
+       for_each_available_child_of_node(root, child) {
+               if (of_device_is_compatible(child, "arm,pl111")) {
+                       has_coretile_clcd = true;
+                       ct_clcd = child;
+                       break;
+               }
+               if (of_device_is_compatible(child, "arm,hdlcd")) {
+                       has_coretile_hdlcd = true;
+                       of_node_put(child);
+                       break;
+               }
+       }
+
+       of_node_put(root);
+
+       /*
+        * If there is a coretile HDLCD and it has a driver,
+        * do not mux the CLCD on the motherboard to the DVI.
+        */
+       if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
+               mux_motherboard = false;
+
+       /*
+        * On the Vexpress CA9 we let the CLCD on the coretile
+        * take precedence, so also in this case do not mux the
+        * motherboard to the DVI.
+        */
+       if (has_coretile_clcd)
+               mux_motherboard = false;
+
+       if (mux_motherboard) {
+               dev_info(dev, "DVI muxed to motherboard CLCD\n");
+               val = VEXPRESS_FPGAMUX_MOTHERBOARD;
+       } else if (ct_clcd == dev->of_node) {
+               dev_info(dev,
+                        "DVI muxed to daughterboard 1 (core tile) CLCD\n");
+               val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
+       } else {
+               dev_info(dev, "core tile graphics present\n");
+               dev_info(dev, "this device will be deactivated\n");
+               return -ENODEV;
+       }
+
+       /* Call into deep Vexpress configuration API */
+       pdev = of_find_device_by_node(np);
+       if (!pdev) {
+               dev_err(dev, "can't find the sysreg device, deferring\n");
+               return -EPROBE_DEFER;
+       }
+
+       map = devm_regmap_init_vexpress_config(&pdev->dev);
+       if (IS_ERR(map)) {
+               platform_device_put(pdev);
+               return PTR_ERR(map);
+       }
+
+       ret = regmap_write(map, 0, val);
+       platform_device_put(pdev);
+       if (ret) {
+               dev_err(dev, "error setting DVI muxmode\n");
+               return -ENODEV;
+       }
+
+       priv->variant = &pl111_vexpress;
+       dev_info(dev, "initializing Versatile Express PL111\n");
+
+       return 0;
+}
+
 int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 {
        const struct of_device_id *clcd_id;
        enum versatile_clcd versatile_clcd_type;
        struct device_node *np;
        struct regmap *map;
-       int ret;
 
        np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
                                             &clcd_id);
@@ -378,6 +475,15 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 
        versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
 
+       /* Versatile Express special handling */
+       if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
+               int ret = pl111_vexpress_clcd_init(dev, np, priv);
+               of_node_put(np);
+               if (ret)
+                       dev_err(dev, "Versatile Express init failed - %d", ret);
+               return ret;
+       }
+
        /*
         * On the Integrator, check if we should use the IM-PD1 instead,
         * if we find it, it will take precedence. This is on the Integrator/AP
@@ -390,37 +496,8 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
                        versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
        }
 
-       /* Versatile Express special handling */
-       if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
-               struct platform_device *pdev;
-
-               /* Registers a driver for the muxfpga */
-               ret = vexpress_muxfpga_init();
-               if (ret) {
-                       dev_err(dev, "unable to initialize muxfpga driver\n");
-                       of_node_put(np);
-                       return ret;
-               }
-
-               /* Call into deep Vexpress configuration API */
-               pdev = of_find_device_by_node(np);
-               if (!pdev) {
-                       dev_err(dev, "can't find the sysreg device, deferring\n");
-                       of_node_put(np);
-                       return -EPROBE_DEFER;
-               }
-               map = dev_get_drvdata(&pdev->dev);
-               if (!map) {
-                       dev_err(dev, "sysreg has not yet probed\n");
-                       platform_device_put(pdev);
-                       of_node_put(np);
-                       return -EPROBE_DEFER;
-               }
-       } else {
-               map = syscon_node_to_regmap(np);
-       }
+       map = syscon_node_to_regmap(np);
        of_node_put(np);
-
        if (IS_ERR(map)) {
                dev_err(dev, "no Versatile syscon regmap\n");
                return PTR_ERR(map);
@@ -466,13 +543,6 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
                priv->variant_display_disable = pl111_realview_clcd_disable;
                dev_info(dev, "set up callbacks for RealView PL111\n");
                break;
-       case VEXPRESS_CLCD_V2M:
-               priv->variant = &pl111_vexpress;
-               dev_info(dev, "initializing Versatile Express PL111\n");
-               ret = pl111_vexpress_clcd_init(dev, priv, map);
-               if (ret)
-                       return ret;
-               break;
        default:
                dev_info(dev, "unknown Versatile system controller\n");
                break;
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
deleted file mode 100644 (file)
index 350570f..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Versatile Express PL111 handling
- * Copyright (C) 2018 Linus Walleij
- *
- * This module binds to the "arm,vexpress-muxfpga" device on the
- * Versatile Express configuration bus and sets up which CLCD instance
- * gets muxed out on the DVI bridge.
- */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/vexpress.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include "pl111_drm.h"
-#include "pl111_vexpress.h"
-
-#define VEXPRESS_FPGAMUX_MOTHERBOARD           0x00
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1       0x01
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2       0x02
-
-int pl111_vexpress_clcd_init(struct device *dev,
-                            struct pl111_drm_dev_private *priv,
-                            struct regmap *map)
-{
-       struct device_node *root;
-       struct device_node *child;
-       struct device_node *ct_clcd = NULL;
-       bool has_coretile_clcd = false;
-       bool has_coretile_hdlcd = false;
-       bool mux_motherboard = true;
-       u32 val;
-       int ret;
-
-       /*
-        * Check if we have a CLCD or HDLCD on the core tile by checking if a
-        * CLCD or HDLCD is available in the root of the device tree.
-        */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return -EINVAL;
-
-       for_each_available_child_of_node(root, child) {
-               if (of_device_is_compatible(child, "arm,pl111")) {
-                       has_coretile_clcd = true;
-                       ct_clcd = child;
-                       break;
-               }
-               if (of_device_is_compatible(child, "arm,hdlcd")) {
-                       has_coretile_hdlcd = true;
-                       of_node_put(child);
-                       break;
-               }
-       }
-
-       of_node_put(root);
-
-       /*
-        * If there is a coretile HDLCD and it has a driver,
-        * do not mux the CLCD on the motherboard to the DVI.
-        */
-       if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
-               mux_motherboard = false;
-
-       /*
-        * On the Vexpress CA9 we let the CLCD on the coretile
-        * take precedence, so also in this case do not mux the
-        * motherboard to the DVI.
-        */
-       if (has_coretile_clcd)
-               mux_motherboard = false;
-
-       if (mux_motherboard) {
-               dev_info(dev, "DVI muxed to motherboard CLCD\n");
-               val = VEXPRESS_FPGAMUX_MOTHERBOARD;
-       } else if (ct_clcd == dev->of_node) {
-               dev_info(dev,
-                        "DVI muxed to daughterboard 1 (core tile) CLCD\n");
-               val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
-       } else {
-               dev_info(dev, "core tile graphics present\n");
-               dev_info(dev, "this device will be deactivated\n");
-               return -ENODEV;
-       }
-
-       ret = regmap_write(map, 0, val);
-       if (ret) {
-               dev_err(dev, "error setting DVI muxmode\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-/*
- * This sets up the regmap pointer that will then be retrieved by
- * the detection code in pl111_versatile.c and passed in to the
- * pl111_vexpress_clcd_init() function above.
- */
-static int vexpress_muxfpga_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct regmap *map;
-
-       map = devm_regmap_init_vexpress_config(&pdev->dev);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
-       dev_set_drvdata(dev, map);
-
-       return 0;
-}
-
-static const struct of_device_id vexpress_muxfpga_match[] = {
-       { .compatible = "arm,vexpress-muxfpga", },
-       {}
-};
-
-static struct platform_driver vexpress_muxfpga_driver = {
-       .driver = {
-               .name = "vexpress-muxfpga",
-               .of_match_table = of_match_ptr(vexpress_muxfpga_match),
-       },
-       .probe = vexpress_muxfpga_probe,
-};
-
-int vexpress_muxfpga_init(void)
-{
-       int ret;
-
-       ret = platform_driver_register(&vexpress_muxfpga_driver);
-       /* -EBUSY just means this driver is already registered */
-       if (ret == -EBUSY)
-               ret = 0;
-       return ret;
-}
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h
deleted file mode 100644 (file)
index 5d3681b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-struct device;
-struct pl111_drm_dev_private;
-struct regmap;
-
-#ifdef CONFIG_ARCH_VEXPRESS
-
-int pl111_vexpress_clcd_init(struct device *dev,
-                            struct pl111_drm_dev_private *priv,
-                            struct regmap *map);
-
-int vexpress_muxfpga_init(void);
-
-#else
-
-static inline int pl111_vexpress_clcd_init(struct device *dev,
-                                          struct pl111_drm_dev_private *priv,
-                                          struct regmap *map)
-{
-       return -ENODEV;
-}
-
-static inline int vexpress_muxfpga_init(void)
-{
-       return 0;
-}
-
-#endif
index a4f4175bbdbeda019eac936187c87c884cbd1539..524d35b648d885045eecfb25bec0bbb7f8a5b569 100644 (file)
@@ -39,7 +39,7 @@ static int
 qxl_debugfs_irq_received(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct qxl_device *qdev = node->minor->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(node->minor->dev);
 
        seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
        seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
@@ -53,7 +53,7 @@ static int
 qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct qxl_device *qdev = node->minor->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(node->minor->dev);
        struct qxl_bo *bo;
 
        list_for_each_entry(bo, &qdev->gem.objects, list) {
@@ -79,36 +79,29 @@ static struct drm_info_list qxl_debugfs_list[] = {
 #define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
 #endif
 
-int
+void
 qxl_debugfs_init(struct drm_minor *minor)
 {
 #if defined(CONFIG_DEBUG_FS)
-       int r;
-       struct qxl_device *dev =
-               (struct qxl_device *) minor->dev->dev_private;
+       struct qxl_device *dev = to_qxl(minor->dev);
 
        drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
                                 minor->debugfs_root, minor);
 
-       r = qxl_ttm_debugfs_init(dev);
-       if (r) {
-               DRM_ERROR("Failed to init TTM debugfs\n");
-               return r;
-       }
+       qxl_ttm_debugfs_init(dev);
 #endif
-       return 0;
 }
 
-int qxl_debugfs_add_files(struct qxl_device *qdev,
-                         struct drm_info_list *files,
-                         unsigned int nfiles)
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+                          struct drm_info_list *files,
+                          unsigned int nfiles)
 {
        unsigned int i;
 
        for (i = 0; i < qdev->debugfs_count; i++) {
                if (qdev->debugfs[i].files == files) {
                        /* Already registered */
-                       return 0;
+                       return;
                }
        }
 
@@ -116,7 +109,7 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
        if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
                DRM_ERROR("Reached maximum number of debugfs components.\n");
                DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
-               return -EINVAL;
+               return;
        }
        qdev->debugfs[qdev->debugfs_count].files = files;
        qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
@@ -126,5 +119,4 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
                                 qdev->ddev.primary->debugfs_root,
                                 qdev->ddev.primary);
 #endif
-       return 0;
 }
index 09583a08e1414381e759f91cb1c70d0ee26251f8..1082cd5d2fd47c20aeff3ecf10d079ff4c3c385b 100644 (file)
@@ -221,7 +221,7 @@ static int qxl_add_mode(struct drm_connector *connector,
                        bool preferred)
 {
        struct drm_device *dev = connector->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_display_mode *mode = NULL;
        int rc;
 
@@ -242,7 +242,7 @@ static int qxl_add_mode(struct drm_connector *connector,
 static int qxl_add_monitors_config_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_output *output = drm_connector_to_qxl_output(connector);
        int h = output->index;
        struct qxl_head *head;
@@ -310,7 +310,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
                                            const char *reason)
 {
        struct drm_device *dev = crtc->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
        struct qxl_head head;
        int oldcount, i = qcrtc->index;
@@ -400,7 +400,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
                                         unsigned int num_clips)
 {
        /* TODO: vmwgfx where this was cribbed from had locking. Why? */
-       struct qxl_device *qdev = fb->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(fb->dev);
        struct drm_clip_rect norect;
        struct qxl_bo *qobj;
        bool is_primary;
@@ -462,7 +462,7 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
 static int qxl_primary_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct qxl_bo *bo;
 
        if (!state->crtc || !state->fb)
@@ -476,7 +476,7 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
 static int qxl_primary_apply_cursor(struct drm_plane *plane)
 {
        struct drm_device *dev = plane->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_framebuffer *fb = plane->state->fb;
        struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
        struct qxl_cursor_cmd *cmd;
@@ -523,7 +523,7 @@ out_free_release:
 static void qxl_primary_atomic_update(struct drm_plane *plane,
                                      struct drm_plane_state *old_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
        struct qxl_bo *primary;
        struct drm_clip_rect norect = {
@@ -554,7 +554,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
 static void qxl_primary_atomic_disable(struct drm_plane *plane,
                                       struct drm_plane_state *old_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
 
        if (old_state->fb) {
                struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
@@ -570,7 +570,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                                     struct drm_plane_state *old_state)
 {
        struct drm_device *dev = plane->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_framebuffer *fb = plane->state->fb;
        struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
        struct qxl_release *release;
@@ -679,7 +679,7 @@ out_free_release:
 static void qxl_cursor_atomic_disable(struct drm_plane *plane,
                                      struct drm_plane_state *old_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct qxl_release *release;
        struct qxl_cursor_cmd *cmd;
        int ret;
@@ -762,7 +762,7 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
 static int qxl_plane_prepare_fb(struct drm_plane *plane,
                                struct drm_plane_state *new_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct drm_gem_object *obj;
        struct qxl_bo *user_bo;
        struct qxl_surface surf;
@@ -923,7 +923,7 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
 {
        struct qxl_crtc *qxl_crtc;
        struct drm_plane *primary, *cursor;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        int r;
 
        qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
@@ -965,7 +965,7 @@ free_mem:
 static int qxl_conn_get_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_output *output = drm_connector_to_qxl_output(connector);
        unsigned int pwidth = 1024;
        unsigned int pheight = 768;
@@ -991,7 +991,7 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
                               struct drm_display_mode *mode)
 {
        struct drm_device *ddev = connector->dev;
-       struct qxl_device *qdev = ddev->dev_private;
+       struct qxl_device *qdev = to_qxl(ddev);
 
        if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
                return MODE_BAD;
@@ -1021,7 +1021,7 @@ static enum drm_connector_status qxl_conn_detect(
        struct qxl_output *output =
                drm_connector_to_qxl_output(connector);
        struct drm_device *ddev = connector->dev;
-       struct qxl_device *qdev = ddev->dev_private;
+       struct qxl_device *qdev = to_qxl(ddev);
        bool connected = false;
 
        /* The first monitor is always connected */
@@ -1071,7 +1071,7 @@ static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
 
 static int qdev_output_init(struct drm_device *dev, int num_output)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_output *qxl_output;
        struct drm_connector *connector;
        struct drm_encoder *encoder;
index 4fda3f9b29f464eb926ee97e2865df12e7affa19..13872b882775d00c2ffdae0547418625efb36d4f 100644 (file)
@@ -81,13 +81,16 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -EINVAL; /* TODO: ENODEV ? */
        }
 
-       qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
-       if (!qdev)
+       qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver,
+                                 struct qxl_device, ddev);
+       if (IS_ERR(qdev)) {
+               pr_err("Unable to init drm dev");
                return -ENOMEM;
+       }
 
        ret = pci_enable_device(pdev);
        if (ret)
-               goto free_dev;
+               return ret;
 
        ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
        if (ret)
@@ -101,7 +104,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
-       ret = qxl_device_init(qdev, &qxl_driver, pdev);
+       ret = qxl_device_init(qdev, pdev);
        if (ret)
                goto put_vga;
 
@@ -128,14 +131,13 @@ put_vga:
                vga_put(pdev, VGA_RSRC_LEGACY_IO);
 disable_pci:
        pci_disable_device(pdev);
-free_dev:
-       kfree(qdev);
+
        return ret;
 }
 
 static void qxl_drm_release(struct drm_device *dev)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
 
        /*
         * TODO: qxl_device_fini() call should be in qxl_pci_remove(),
@@ -144,8 +146,6 @@ static void qxl_drm_release(struct drm_device *dev)
         */
        qxl_modeset_fini(qdev);
        qxl_device_fini(qdev);
-       dev->dev_private = NULL;
-       kfree(qdev);
 }
 
 static void
@@ -157,7 +157,6 @@ qxl_pci_remove(struct pci_dev *pdev)
        drm_atomic_helper_shutdown(dev);
        if (is_vga(pdev))
                vga_put(pdev, VGA_RSRC_LEGACY_IO);
-       drm_dev_put(dev);
 }
 
 DEFINE_DRM_GEM_FOPS(qxl_fops);
@@ -165,7 +164,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
 static int qxl_drm_freeze(struct drm_device *dev)
 {
        struct pci_dev *pdev = dev->pdev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        int ret;
 
        ret = drm_mode_config_helper_suspend(dev);
@@ -187,7 +186,7 @@ static int qxl_drm_freeze(struct drm_device *dev)
 
 static int qxl_drm_resume(struct drm_device *dev, bool thaw)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
 
        qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
        if (!thaw) {
@@ -246,7 +245,7 @@ static int qxl_pm_restore(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct qxl_device *qdev = drm_dev->dev_private;
+       struct qxl_device *qdev = to_qxl(drm_dev);
 
        qxl_io_reset(qdev);
        return qxl_drm_resume(drm_dev, false);
index 27e45a2d6b52cdba60cc41dc0c753671e920b823..31e35f787df2c2a7bf23f54de16f7ac60b36a4b5 100644 (file)
@@ -190,13 +190,8 @@ struct qxl_debugfs {
        unsigned int num_files;
 };
 
-int qxl_debugfs_add_files(struct qxl_device *rdev,
-                            struct drm_info_list *files,
-                            unsigned int nfiles);
 int qxl_debugfs_fence_init(struct qxl_device *rdev);
 
-struct qxl_device;
-
 struct qxl_device {
        struct drm_device ddev;
 
@@ -276,11 +271,12 @@ struct qxl_device {
        int monitors_config_height;
 };
 
+#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
+
 extern const struct drm_ioctl_desc qxl_ioctls[];
 extern int qxl_max_ioctl;
 
-int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
-                   struct pci_dev *pdev);
+int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
 void qxl_device_fini(struct qxl_device *qdev);
 
 int qxl_modeset_init(struct qxl_device *qdev);
@@ -442,8 +438,8 @@ int qxl_garbage_collect(struct qxl_device *qdev);
 
 /* debugfs */
 
-int qxl_debugfs_init(struct drm_minor *minor);
-int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+void qxl_debugfs_init(struct drm_minor *minor);
+void qxl_ttm_debugfs_init(struct qxl_device *qdev);
 
 /* qxl_prime.c */
 int qxl_gem_prime_pin(struct drm_gem_object *obj);
@@ -461,9 +457,9 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
 int qxl_irq_init(struct qxl_device *qdev);
 irqreturn_t qxl_irq_handler(int irq, void *arg);
 
-int qxl_debugfs_add_files(struct qxl_device *qdev,
-                         struct drm_info_list *files,
-                         unsigned int nfiles);
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+                          struct drm_info_list *files,
+                          unsigned int nfiles);
 
 int qxl_surface_id_alloc(struct qxl_device *qdev,
                         struct qxl_bo *surf);
index 272d19b677d8f9a10ed118915d2c65b7dcc15696..24e903383aa17c22231a1b5e326a327f86c896f3 100644 (file)
@@ -32,7 +32,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_bo *qobj;
        uint32_t handle;
        int r;
index 69f37db1027ad7852f7dbfefaf4e33b134c550ac..5ff6fa9b799c5a29122038f9a95106a2fea23a1c 100644 (file)
@@ -34,7 +34,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
        struct qxl_device *qdev;
        struct ttm_buffer_object *tbo;
 
-       qdev = (struct qxl_device *)gobj->dev->dev_private;
+       qdev = to_qxl(gobj->dev);
 
        qxl_surface_evict(qdev, qobj, false);
 
index 8117a45b36102192b3acd09e269faedfb06dc146..d9a583966949dfa5560438dd86f8f90cb61f27d5 100644 (file)
@@ -36,7 +36,7 @@
 static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_alloc *qxl_alloc = data;
        int ret;
        struct qxl_bo *qobj;
@@ -64,7 +64,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
 static int qxl_map_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_map *qxl_map = data;
 
        return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
@@ -279,7 +279,7 @@ out_free_reloc:
 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_execbuffer *execbuffer = data;
        struct drm_qxl_command user_cmd;
        int cmd_num;
@@ -304,7 +304,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
 static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_update_area *update_area = data;
        struct qxl_rect area = {.left = update_area->left,
                                .top = update_area->top,
@@ -354,7 +354,7 @@ out:
 static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_getparam *param = data;
 
        switch (param->param) {
@@ -373,7 +373,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
 static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_clientcap *param = data;
        int byte, idx;
 
@@ -394,7 +394,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
 static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_alloc_surf *param = data;
        struct qxl_bo *qobj;
        int handle;
index 8435af108632c8a7a4ba04c472072f4a5ac486ca..1ba5a702d7636d862756b9b3cb2705ca16021d40 100644 (file)
@@ -32,7 +32,7 @@
 irqreturn_t qxl_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
-       struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        uint32_t pending;
 
        pending = xchg(&qdev->ram_header->int_pending, 0);
index 70b20ee4741ae0b6f6522207b7b8136561486c24..a6d873052cd409729dfadbeaaf0a104999b72395 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 
 #include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 
 #include "qxl_drv.h"
@@ -107,20 +108,12 @@ static void qxl_gc_work(struct work_struct *work)
 }
 
 int qxl_device_init(struct qxl_device *qdev,
-                   struct drm_driver *drv,
                    struct pci_dev *pdev)
 {
        int r, sb;
 
-       r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
-       if (r) {
-               pr_err("Unable to init drm dev");
-               goto error;
-       }
-
        qdev->ddev.pdev = pdev;
        pci_set_drvdata(pdev, &qdev->ddev);
-       qdev->ddev.dev_private = qdev;
 
        mutex_init(&qdev->gem.mutex);
        mutex_init(&qdev->update_area_mutex);
@@ -136,8 +129,7 @@ int qxl_device_init(struct qxl_device *qdev,
        qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
        if (!qdev->vram_mapping) {
                pr_err("Unable to create vram_mapping");
-               r = -ENOMEM;
-               goto error;
+               return -ENOMEM;
        }
 
        if (pci_resource_len(pdev, 4) > 0) {
@@ -218,7 +210,7 @@ int qxl_device_init(struct qxl_device *qdev,
                                &(qdev->ram_header->cursor_ring_hdr),
                                sizeof(struct qxl_command),
                                QXL_CURSOR_RING_SIZE,
-                               qdev->io_base + QXL_IO_NOTIFY_CMD,
+                               qdev->io_base + QXL_IO_NOTIFY_CURSOR,
                                false,
                                &qdev->cursor_event);
 
@@ -291,7 +283,6 @@ surface_mapping_free:
        io_mapping_free(qdev->surface_mapping);
 vram_mapping_free:
        io_mapping_free(qdev->vram_mapping);
-error:
        return r;
 }
 
index ab72dc3476e9cfc6d0b486376a89dcdbfa01ddd6..edc8a9916872c76444022026f2967f5b34f17b1d 100644 (file)
@@ -33,7 +33,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        struct qxl_device *qdev;
 
        bo = to_qxl_bo(tbo);
-       qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
+       qdev = to_qxl(bo->tbo.base.dev);
 
        qxl_surface_evict(qdev, bo, false);
        WARN_ON_ONCE(bo->map_count > 0);
index 2feca734c7b195fe385dea55417ad9c139c6655d..4fae3e393da14994723a458e78d6d9d4510e5211 100644 (file)
@@ -243,7 +243,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
                return ret;
 
        /* allocate a surface for reserved + validated buffers */
-       ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
+       ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
        if (ret)
                return ret;
        return 0;
index 62a5e424971bef7345d665cf666790a2d453c47f..f09a712b1ed2fa2489a4210a4903467b23f4335a 100644 (file)
@@ -243,7 +243,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
        if (!qxl_ttm_bo_is_qxl_bo(bo))
                return;
        qbo = to_qxl_bo(bo);
-       qdev = qbo->tbo.base.dev->dev_private;
+       qdev = to_qxl(qbo->tbo.base.dev);
 
        if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
                qxl_surface_evict(qdev, qbo, new_mem ? true : false);
@@ -322,7 +322,7 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
 }
 #endif
 
-int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+void qxl_ttm_debugfs_init(struct qxl_device *qdev)
 {
 #if defined(CONFIG_DEBUG_FS)
        static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
@@ -343,8 +343,6 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
                        qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
 
        }
-       return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
-#else
-       return 0;
+       qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
 #endif
 }
index 9b4072f97215edca4630cb16ed68fb6a641b96ee..3e76ae5a17eea3c79f424fb05cac20ab4d98bfe3 100644 (file)
  */
 
 #include <linux/export.h>
+#include <linux/pci.h>
 
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
+#include <drm/drm_legacy.h>
 #include <drm/drm_print.h>
 
 #include "ati_pcigart.h"
index c693b2ca03298ff0031c3bc3ce8bf7aeb86b457a..11c97edde54ddade659646b82ac466e39226b37b 100644 (file)
@@ -3,42 +3,13 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-ccflags-y := -Idrivers/gpu/drm/amd/include
-
 hostprogs := mkregtable
-clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+targets := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
 
-quiet_cmd_mkregtable = MKREGTABLE $@
+quiet_cmd_mkregtable = MKREG   $@
       cmd_mkregtable = $(obj)/mkregtable $< > $@
 
-$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+$(obj)/%_reg_safe.h: $(src)/reg_srcs/% $(obj)/mkregtable FORCE
        $(call if_changed,mkregtable)
 
 $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
index 2c27627b66593cbd0647dd2bdcab325b7f052077..f15b20da5315c848c88714c3ccc1174d2f9dd5d0 100644 (file)
@@ -1211,8 +1211,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
        SDEBUG("<<\n");
 
 free:
-       if (ws)
-               kfree(ectx.ws);
+       kfree(ectx.ws);
        return ret;
 }
 
index a9257bed348497be3bd93de1577897985e6d570e..134aa2b01f9071d14f3658019d74aebaed0285e9 100644 (file)
@@ -65,13 +65,6 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
        { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
 };
 
-static const struct ci_pt_defaults defaults_bonaire_pro =
-{
-       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
-       { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
-       { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
-};
-
 static const struct ci_pt_defaults defaults_saturn_xt =
 {
        1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
@@ -79,13 +72,6 @@ static const struct ci_pt_defaults defaults_saturn_xt =
        { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
 };
 
-static const struct ci_pt_defaults defaults_saturn_pro =
-{
-       1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
-       { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
-       { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
-};
-
 static const struct ci_pt_config_reg didt_config_ci[] =
 {
        { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
index 848ef68d90861bbc115b7a679ede8162baff6d06..5d25917251892bb09ae3879f464ef32a5fa19e49 100644 (file)
@@ -2111,7 +2111,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                                                                    ucOverdriveThermalController];
                        info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
                        strlcpy(info.type, name, sizeof(info.type));
-                       i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                       i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                }
        }
        num_modes = power_info->info.ucNumOfPowerModeEntries;
@@ -2351,7 +2351,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
                                const char *name = pp_lib_thermal_controller_names[controller->ucType];
                                info.addr = controller->ucI2cAddress >> 1;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                        }
                } else {
                        DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
index c3e49c973812a1ec8fc7af297eaa6ca472b3fa70..d3c04df7e75d743eac15eb956ab1ff8febf76a39 100644 (file)
@@ -2704,7 +2704,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
                                const char *name = thermal_controller_names[thermal_controller];
                                info.addr = i2c_addr >> 1;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                        }
                }
        } else {
@@ -2721,7 +2721,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
                                const char *name = "f75375";
                                info.addr = 0x28;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                                DRM_INFO("Possible %s thermal controller at 0x%02x\n",
                                         name, info.addr);
                        }
index 59f8186a24151ee79240b4f14cc7276fda535ba5..bbb0883e8ce6a3184421c3ddcb2fca106a1e4352 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/mmu_notifier.h>
+#include <linux/pci.h>
 
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_crtc_helper.h>
@@ -44,7 +45,6 @@
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
index 58176db85952cb990f3af9f511c91c007e3a7687..95006cbf42c313b8295c596038fa6fc96bd57075 100644 (file)
@@ -828,7 +828,7 @@ int radeon_enable_vblank_kms(struct drm_crtc *crtc)
        unsigned long irqflags;
        int r;
 
-       if (pipe < 0 || pipe >= rdev->num_crtc) {
+       if (pipe >= rdev->num_crtc) {
                DRM_ERROR("Invalid crtc %d\n", pipe);
                return -EINVAL;
        }
@@ -854,7 +854,7 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc)
        struct radeon_device *rdev = dev->dev_private;
        unsigned long irqflags;
 
-       if (pipe < 0 || pipe >= rdev->num_crtc) {
+       if (pipe >= rdev->num_crtc) {
                DRM_ERROR("Invalid crtc %d\n", pipe);
                return;
        }
index 2cb85dbe728f0ecbd764d9115e90cbcf7ae802ec..a167e1c36d2432719e1f3032d8e97577a074bc66 100644 (file)
@@ -252,24 +252,6 @@ static const struct si_dte_data dte_data_tahiti =
        false
 };
 
-static const struct si_dte_data dte_data_tahiti_le =
-{
-       { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
-       { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
-       0x5,
-       0xAFC8,
-       0x64,
-       0x32,
-       1,
-       0,
-       0x10,
-       { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
-       { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
-       { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
-       85,
-       true
-};
-
 static const struct si_dte_data dte_data_tahiti_pro =
 {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
index 654e2dd081460d3382592131602c98ecc893586b..3e67cf70f040215e1ed0245777091efeeaa4ee2a 100644 (file)
@@ -530,7 +530,6 @@ static int rcar_du_remove(struct platform_device *pdev)
        drm_dev_unregister(ddev);
 
        drm_kms_helper_poll_fini(ddev);
-       drm_mode_config_cleanup(ddev);
 
        drm_dev_put(ddev);
 
index c07c6a88aff0224ffe9a34669f13c390b622f544..b0335da0c1614609846bbc78594e00970518e82a 100644 (file)
@@ -13,6 +13,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rcar_du_drv.h"
 #include "rcar_du_encoder.h"
  * Encoder
  */
 
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
 {
        struct device_node *ports;
@@ -110,13 +104,11 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                }
        }
 
-       ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(rcdu->ddev, encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret < 0)
                goto done;
 
-       drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
        /*
         * Attach the bridge to the encoder. The bridge will create the
         * connector.
index fcfd916227d131a9b41ca157a91994ea3b6374ef..482329102f1925cd184bd5fbf29109c870c342a7 100644 (file)
@@ -712,7 +712,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
        unsigned int i;
        int ret;
 
-       drm_mode_config_init(dev);
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
 
        dev->mode_config.min_width = 0;
        dev->mode_config.min_height = 0;
index c6430027169fc8a0daa6973876bfe7c52dab6376..a0021fc25b27c12054b5fd29bfbcc9e9a8f9b164 100644 (file)
@@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
 
                drm_plane_create_alpha_property(&plane->plane);
 
-               if (type == DRM_PLANE_TYPE_PRIMARY)
-                       continue;
-
-               drm_object_attach_property(&plane->plane.base,
-                                          rcdu->props.colorkey,
-                                          RCAR_DU_COLORKEY_NONE);
-               drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+               if (type == DRM_PLANE_TYPE_PRIMARY) {
+                       drm_plane_create_zpos_immutable_property(&plane->plane,
+                                                                0);
+               } else {
+                       drm_object_attach_property(&plane->plane.base,
+                                                  rcdu->props.colorkey,
+                                                  RCAR_DU_COLORKEY_NONE);
+                       drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+               }
        }
 
        return 0;
index 5e4faf258c31a6b9042a4c47958449469348da3b..f1a81c9b184d4c427b2de1df4ac588e966c92142 100644 (file)
@@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
                drm_plane_helper_add(&plane->plane,
                                     &rcar_du_vsp_plane_helper_funcs);
 
-               if (type == DRM_PLANE_TYPE_PRIMARY)
-                       continue;
-
-               drm_plane_create_alpha_property(&plane->plane);
-               drm_plane_create_zpos_property(&plane->plane, 1, 1,
-                                              vsp->num_planes - 1);
+               if (type == DRM_PLANE_TYPE_PRIMARY) {
+                       drm_plane_create_zpos_immutable_property(&plane->plane,
+                                                                0);
+               } else {
+                       drm_plane_create_alpha_property(&plane->plane);
+                       drm_plane_create_zpos_property(&plane->plane, 1, 1,
+                                                      vsp->num_planes - 1);
+               }
        }
 
        return 0;
index ce98c08aa8b446da487481ebf51a4f996a4efd77..ade2327a10e2c3d340bf91846f25f4748c34c70e 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -258,10 +259,6 @@ static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
        .atomic_check = rockchip_dp_drm_encoder_atomic_check,
 };
 
-static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
 {
        struct device *dev = dp->dev;
@@ -309,8 +306,8 @@ static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
                                                             dev->of_node);
        DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
 
-       ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                DRM_ERROR("failed to initialize encoder with drm\n");
                return ret;
index eed594bd38d35f3cb3d9fa93740cf032d16147d4..c634b95b50f7518040abe6ecf888812dc35c72af 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "cdn-dp-core.h"
 #include "cdn-dp-reg.h"
@@ -689,10 +690,6 @@ static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
        .atomic_check = cdn_dp_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
 {
        struct device *dev = dp->dev;
@@ -1030,8 +1027,8 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
                                                             dev->of_node);
        DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
 
-       ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                DRM_ERROR("failed to initialize encoder with drm\n");
                return ret;
@@ -1109,7 +1106,7 @@ static const struct component_ops cdn_dp_component_ops = {
        .unbind = cdn_dp_unbind,
 };
 
-int cdn_dp_suspend(struct device *dev)
+static int cdn_dp_suspend(struct device *dev)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
        int ret = 0;
@@ -1123,7 +1120,7 @@ int cdn_dp_suspend(struct device *dev)
        return ret;
 }
 
-int cdn_dp_resume(struct device *dev)
+static int cdn_dp_resume(struct device *dev)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
 
index 7361c07cb4a78bd72b72e49bb8a27cbb452cdbf6..9d2163ef4d6e21e875356c346b9bd5ad3a2fb6d7 100644 (file)
@@ -601,7 +601,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
        case YCBCR_4_2_0:
                val[0] = 5;
                break;
-       };
+       }
 
        switch (video->color_depth) {
        case 6:
@@ -619,7 +619,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
        case 16:
                val[1] = 4;
                break;
-       };
+       }
 
        msa_misc = 2 * val[0] + 32 * val[1] +
                   ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
@@ -700,7 +700,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
        case 16:
                val = BCS_16;
                break;
-       };
+       }
 
        val += video->color_fmt << 8;
        ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
index 6e1270e45f974d444df8a7b9d2af176523d206c2..3feff0c45b3f745f5c4aa6d52dcf9d87a6c6ed03 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/bridge/dw_mipi_dsi.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -789,10 +790,6 @@ dw_mipi_dsi_encoder_helper_funcs = {
        .disable = dw_mipi_dsi_encoder_disable,
 };
 
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
                                           struct drm_device *drm_dev)
 {
@@ -802,8 +799,7 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
        encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
                                                             dsi->dev->of_node);
 
-       ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
-                              DRM_MODE_ENCODER_DSI, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
        if (ret) {
                DRM_ERROR("Failed to initialize encoder with drm\n");
                return ret;
index 7f56d8c3491daccda86c3156b60dde8d4a43ee34..121aa8a63a7611c04441cf06a5f338a9a820ae99 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -237,10 +238,6 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
        return (valid) ? MODE_OK : MODE_BAD;
 }
 
-static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
 {
 }
@@ -546,8 +543,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
        }
 
        drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        platform_set_drvdata(pdev, hdmi);
 
index e5864e8230205f9a014d80bc8d235a5ca89f4794..7afdc54eb3ec1246482f44045c777c04a6fb5385 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -532,10 +533,6 @@ static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
        .atomic_check = inno_hdmi_encoder_atomic_check,
 };
 
-static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_connector_status
 inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -617,8 +614,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
                return -EPROBE_DEFER;
 
        drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
 
index fe203d38664eb5f2dea49d5dc1fc3078a321cf61..1c546c3a8998473a731da73871acf71be13e6ad4 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include <linux/clk.h>
 #include <linux/mfd/syscon.h>
@@ -451,10 +452,6 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
        .atomic_check = rk3066_hdmi_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_connector_status
 rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -557,8 +554,7 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
                return -EPROBE_DEFER;
 
        drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
 
index 20ecb1508a2247992143fda3a8be830da110de17..0f3eb392fe39d5cc1f41d7c8a1dd074af75550ce 100644 (file)
@@ -135,14 +135,16 @@ static int rockchip_drm_bind(struct device *dev)
        if (ret)
                goto err_free;
 
-       drm_mode_config_init(drm_dev);
+       ret = drmm_mode_config_init(drm_dev);
+       if (ret)
+               goto err_iommu_cleanup;
 
        rockchip_drm_mode_config_init(drm_dev);
 
        /* Try to bind all sub drivers. */
        ret = component_bind_all(dev, drm_dev);
        if (ret)
-               goto err_mode_config_cleanup;
+               goto err_iommu_cleanup;
 
        ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
        if (ret)
@@ -173,12 +175,9 @@ err_kms_helper_poll_fini:
        rockchip_drm_fbdev_fini(drm_dev);
 err_unbind_all:
        component_unbind_all(dev, drm_dev);
-err_mode_config_cleanup:
-       drm_mode_config_cleanup(drm_dev);
+err_iommu_cleanup:
        rockchip_iommu_cleanup(drm_dev);
 err_free:
-       drm_dev->dev_private = NULL;
-       dev_set_drvdata(dev, NULL);
        drm_dev_put(drm_dev);
        return ret;
 }
@@ -194,11 +193,8 @@ static void rockchip_drm_unbind(struct device *dev)
 
        drm_atomic_helper_shutdown(drm_dev);
        component_unbind_all(dev, drm_dev);
-       drm_mode_config_cleanup(drm_dev);
        rockchip_iommu_cleanup(drm_dev);
 
-       drm_dev->dev_private = NULL;
-       dev_set_drvdata(dev, NULL);
        drm_dev_put(drm_dev);
 }
 
index c5b06048124e3a8004c7b5ebd654e4f9af2a94e1..e33c2dcd0d4b6fe9a9a071fda0d0de80103407bb 100644 (file)
@@ -30,6 +30,7 @@ struct rockchip_crtc_state {
        int output_mode;
        int output_bpc;
        int output_flags;
+       bool enable_afbc;
 };
 #define to_rockchip_crtc_state(s) \
                container_of(s, struct rockchip_crtc_state, base)
index 221e72e71432105953b3704b76bef77aa6e8ed6e..9b13c784b3475280483cd621f79f2c72d7349479 100644 (file)
@@ -57,8 +57,49 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
        .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
 };
 
+static struct drm_framebuffer *
+rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+                  const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_afbc_framebuffer *afbc_fb;
+       const struct drm_format_info *info;
+       int ret;
+
+       info = drm_get_format_info(dev, mode_cmd);
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
+       if (!afbc_fb)
+               return ERR_PTR(-ENOMEM);
+
+       ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+                                        &rockchip_drm_fb_funcs);
+       if (ret) {
+               kfree(afbc_fb);
+               return ERR_PTR(ret);
+       }
+
+       if (drm_is_afbc(mode_cmd->modifier[0])) {
+               int ret, i;
+
+               ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+               if (ret) {
+                       struct drm_gem_object **obj = afbc_fb->base.obj;
+
+                       for (i = 0; i < info->num_planes; ++i)
+                               drm_gem_object_put_unlocked(obj[i]);
+
+                       kfree(afbc_fb);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       return &afbc_fb->base;
+}
+
 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
-       .fb_create = drm_gem_fb_create_with_dirty,
+       .fb_create = rockchip_fb_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
        .atomic_commit = drm_atomic_helper_commit,
index cecb2cc781f5493dd7fe4756bd4e0a635d1a3ffa..33463b79a37b3d1318d7cb789992b81833c246ce 100644 (file)
 #define VOP_WIN_TO_INDEX(vop_win) \
        ((vop_win) - (vop_win)->vop->win)
 
+#define VOP_AFBC_SET(vop, name, v) \
+       do { \
+               if ((vop)->data->afbc) \
+                       vop_reg_set((vop), &(vop)->data->afbc->name, \
+                                   0, ~0, v, #name); \
+       } while (0)
+
 #define to_vop(x) container_of(x, struct vop, crtc)
 #define to_vop_win(x) container_of(x, struct vop_win, base)
 
+#define AFBC_FMT_RGB565                0x0
+#define AFBC_FMT_U8U8U8U8      0x5
+#define AFBC_FMT_U8U8U8                0x4
+
+#define AFBC_TILE_16x16                BIT(4)
+
 /*
  * The coefficients of the following matrix are all fixed points.
  * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
@@ -274,6 +287,29 @@ static enum vop_data_format vop_convert_format(uint32_t format)
        }
 }
 
+static int vop_convert_afbc_format(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               return AFBC_FMT_U8U8U8U8;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+               return AFBC_FMT_U8U8U8;
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+               return AFBC_FMT_RGB565;
+       /* either of the below should not be reachable */
+       default:
+               DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
                                  uint32_t dst, bool is_horizontal,
                                  int vsu_mode, int *vskiplines)
@@ -598,6 +634,17 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
                        vop_win_disable(vop, vop_win);
                }
        }
+
+       if (vop->data->afbc) {
+               struct rockchip_crtc_state *s;
+               /*
+                * Disable AFBC and forget there was a vop window with AFBC
+                */
+               VOP_AFBC_SET(vop, enable, 0);
+               s = to_rockchip_crtc_state(crtc->state);
+               s->enable_afbc = false;
+       }
+
        spin_unlock(&vop->reg_lock);
 
        vop_cfg_done(vop);
@@ -710,6 +757,26 @@ static void vop_plane_destroy(struct drm_plane *plane)
        drm_plane_cleanup(plane);
 }
 
+static inline bool rockchip_afbc(u64 modifier)
+{
+       return modifier == ROCKCHIP_AFBC_MOD;
+}
+
+static bool rockchip_mod_supported(struct drm_plane *plane,
+                                  u32 format, u64 modifier)
+{
+       if (modifier == DRM_FORMAT_MOD_LINEAR)
+               return true;
+
+       if (!rockchip_afbc(modifier)) {
+               DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier);
+
+               return false;
+       }
+
+       return vop_convert_afbc_format(format) >= 0;
+}
+
 static int vop_plane_atomic_check(struct drm_plane *plane,
                           struct drm_plane_state *state)
 {
@@ -758,6 +825,30 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
+       if (rockchip_afbc(fb->modifier)) {
+               struct vop *vop = to_vop(crtc);
+
+               if (!vop->data->afbc) {
+                       DRM_ERROR("vop does not support AFBC\n");
+                       return -EINVAL;
+               }
+
+               ret = vop_convert_afbc_format(fb->format->format);
+               if (ret < 0)
+                       return ret;
+
+               if (state->src.x1 || state->src.y1) {
+                       DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]);
+                       return -EINVAL;
+               }
+
+               if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) {
+                       DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
+                                 state->rotation);
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
@@ -846,6 +937,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 
        spin_lock(&vop->reg_lock);
 
+       if (rockchip_afbc(fb->modifier)) {
+               int afbc_format = vop_convert_afbc_format(fb->format->format);
+
+               VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16);
+               VOP_AFBC_SET(vop, hreg_block_split, 0);
+               VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win));
+               VOP_AFBC_SET(vop, hdr_ptr, dma_addr);
+               VOP_AFBC_SET(vop, pic_size, act_info);
+       }
+
        VOP_WIN_SET(vop, win, format, format);
        VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
        VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
@@ -1001,6 +1102,7 @@ static const struct drm_plane_funcs vop_plane_funcs = {
        .reset = drm_atomic_helper_plane_reset,
        .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .format_mod_supported = rockchip_mod_supported,
 };
 
 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -1310,6 +1412,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
                                 struct drm_crtc_state *crtc_state)
 {
        struct vop *vop = to_vop(crtc);
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       struct rockchip_crtc_state *s;
+       int afbc_planes = 0;
 
        if (vop->lut_regs && crtc_state->color_mgmt_changed &&
            crtc_state->gamma_lut) {
@@ -1323,6 +1429,27 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
                }
        }
 
+       drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+               plane_state =
+                       drm_atomic_get_plane_state(crtc_state->state, plane);
+               if (IS_ERR(plane_state)) {
+                       DRM_DEBUG_KMS("Cannot get plane state for plane %s\n",
+                                     plane->name);
+                       return PTR_ERR(plane_state);
+               }
+
+               if (drm_is_afbc(plane_state->fb->modifier))
+                       ++afbc_planes;
+       }
+
+       if (afbc_planes > 1) {
+               DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes);
+               return -EINVAL;
+       }
+
+       s = to_rockchip_crtc_state(crtc_state);
+       s->enable_afbc = afbc_planes > 0;
+
        return 0;
 }
 
@@ -1333,6 +1460,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct vop *vop = to_vop(crtc);
        struct drm_plane *plane;
+       struct rockchip_crtc_state *s;
        int i;
 
        if (WARN_ON(!vop->is_enabled))
@@ -1340,6 +1468,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
 
        spin_lock(&vop->reg_lock);
 
+       /* Enable AFBC if there is some AFBC window, disable otherwise. */
+       s = to_rockchip_crtc_state(crtc->state);
+       VOP_AFBC_SET(vop, enable, s->enable_afbc);
        vop_cfg_done(vop);
 
        spin_unlock(&vop->reg_lock);
@@ -1634,7 +1765,8 @@ static int vop_create_crtc(struct vop *vop)
                                               0, &vop_plane_funcs,
                                               win_data->phy->data_formats,
                                               win_data->phy->nformats,
-                                              NULL, win_data->type, NULL);
+                                              win_data->phy->format_modifiers,
+                                              win_data->type, NULL);
                if (ret) {
                        DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
                                      ret);
@@ -1678,7 +1810,8 @@ static int vop_create_crtc(struct vop *vop)
                                               &vop_plane_funcs,
                                               win_data->phy->data_formats,
                                               win_data->phy->nformats,
-                                              NULL, win_data->type, NULL);
+                                              win_data->phy->format_modifiers,
+                                              win_data->type, NULL);
                if (ret) {
                        DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
                                      ret);
index cc672620d6e04c9f690c28d4836c7e162c1c7123..d03bdb531ef2c42a95d59cd30dc517d28e2a4f69 100644 (file)
 
 #define NUM_YUV2YUV_COEFFICIENTS 12
 
+#define ROCKCHIP_AFBC_MOD \
+       DRM_FORMAT_MOD_ARM_AFBC( \
+               AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+       )
+
 enum vop_data_format {
        VOP_FMT_ARGB8888 = 0,
        VOP_FMT_RGB888,
@@ -34,6 +39,16 @@ struct vop_reg {
        bool relaxed;
 };
 
+struct vop_afbc {
+       struct vop_reg enable;
+       struct vop_reg win_sel;
+       struct vop_reg format;
+       struct vop_reg hreg_block_split;
+       struct vop_reg pic_size;
+       struct vop_reg hdr_ptr;
+       struct vop_reg rstn;
+};
+
 struct vop_modeset {
        struct vop_reg htotal_pw;
        struct vop_reg hact_st_end;
@@ -134,6 +149,7 @@ struct vop_win_phy {
        const struct vop_scl_regs *scl;
        const uint32_t *data_formats;
        uint32_t nformats;
+       const uint64_t *format_modifiers;
 
        struct vop_reg enable;
        struct vop_reg gate;
@@ -173,6 +189,7 @@ struct vop_data {
        const struct vop_misc *misc;
        const struct vop_modeset *modeset;
        const struct vop_output *output;
+       const struct vop_afbc *afbc;
        const struct vop_win_yuv2yuv_data *win_yuv2yuv;
        const struct vop_win_data *win;
        unsigned int win_size;
index 449a62908d213215e6f83782cd88e91fd54a1a1c..63f967902c2d847922199cdae79cf517dd4d74da 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
-
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -435,10 +436,6 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
        .atomic_check = rockchip_lvds_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int rk3288_lvds_probe(struct platform_device *pdev,
                             struct rockchip_lvds *lvds)
 {
@@ -607,8 +604,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
        encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
                                                             dev->of_node);
 
-       ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
-                              DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
        if (ret < 0) {
                DRM_DEV_ERROR(drm_dev->dev,
                              "failed to initialize encoder: %d\n", ret);
index 90784781e51596499677e95821c4c1a5e181b92d..9a771af5d0c9546c89f25238e85f28f03a3d13ba 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -67,10 +68,6 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
        .atomic_check = rockchip_rgb_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
                                       struct drm_crtc *crtc,
                                       struct drm_device *drm_dev)
@@ -126,8 +123,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
        encoder = &rgb->encoder;
        encoder->possible_crtcs = drm_crtc_mask(crtc);
 
-       ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
        if (ret < 0) {
                DRM_DEV_ERROR(drm_dev->dev,
                              "failed to initialize encoder: %d\n", ret);
index 7a9d979c8d5d42e2c94b61a3ceed1e3f3ec5248f..2413deded22c5f58c0c90aab524939b86e2da2c4 100644 (file)
@@ -50,6 +50,17 @@ static const uint32_t formats_win_full[] = {
        DRM_FORMAT_NV24,
 };
 
+static const uint64_t format_modifiers_win_full[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_win_full_afbc[] = {
+       ROCKCHIP_AFBC_MOD,
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID,
+};
+
 static const uint32_t formats_win_lite[] = {
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
@@ -61,6 +72,11 @@ static const uint32_t formats_win_lite[] = {
        DRM_FORMAT_BGR565,
 };
 
+static const uint64_t format_modifiers_win_lite[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID,
+};
+
 static const struct vop_scl_regs rk3036_win_scl = {
        .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
        .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
@@ -72,6 +88,7 @@ static const struct vop_win_phy rk3036_win0_data = {
        .scl = &rk3036_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
        .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
        .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
@@ -87,6 +104,7 @@ static const struct vop_win_phy rk3036_win0_data = {
 static const struct vop_win_phy rk3036_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
        .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
        .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -153,6 +171,7 @@ static const struct vop_data rk3036_vop = {
 static const struct vop_win_phy rk3126_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
        .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
        .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -234,6 +253,7 @@ static const struct vop_win_phy px30_win0_data = {
        .scl = &px30_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
        .rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
@@ -249,6 +269,7 @@ static const struct vop_win_phy px30_win0_data = {
 static const struct vop_win_phy px30_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
        .format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
        .rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
@@ -261,6 +282,7 @@ static const struct vop_win_phy px30_win1_data = {
 static const struct vop_win_phy px30_win2_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
        .enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
        .format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
@@ -316,6 +338,7 @@ static const struct vop_win_phy rk3066_win0_data = {
        .scl = &rk3066_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
        .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
        .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
@@ -332,6 +355,7 @@ static const struct vop_win_phy rk3066_win1_data = {
        .scl = &rk3066_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
        .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
        .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
@@ -347,6 +371,7 @@ static const struct vop_win_phy rk3066_win1_data = {
 static const struct vop_win_phy rk3066_win2_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
        .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
        .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
@@ -426,6 +451,7 @@ static const struct vop_win_phy rk3188_win0_data = {
        .scl = &rk3188_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
        .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
        .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
@@ -440,6 +466,7 @@ static const struct vop_win_phy rk3188_win0_data = {
 static const struct vop_win_phy rk3188_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
        .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
        .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
@@ -545,6 +572,7 @@ static const struct vop_win_phy rk3288_win01_data = {
        .scl = &rk3288_win_full_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
        .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
@@ -563,6 +591,7 @@ static const struct vop_win_phy rk3288_win01_data = {
 static const struct vop_win_phy rk3288_win23_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
        .gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
        .format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
@@ -677,6 +706,7 @@ static const struct vop_win_phy rk3368_win01_data = {
        .scl = &rk3288_win_full_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
        .rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
@@ -697,6 +727,7 @@ static const struct vop_win_phy rk3368_win01_data = {
 static const struct vop_win_phy rk3368_win23_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
        .enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
        .format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
@@ -817,6 +848,53 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
          .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
        { .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
        { .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
+
+};
+
+static const struct vop_win_phy rk3399_win01_data = {
+       .scl = &rk3288_win_full_scl,
+       .data_formats = formats_win_full,
+       .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full_afbc,
+       .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
+       .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
+       .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+       .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
+       .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
+       .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
+       .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
+       .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+       .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+/*
+ * rk3399 vop big windows register layout is same as rk3288, but we
+ * have a separate rk3399 win data array here so that we can advertise
+ * AFBC on the primary plane.
+ */
+static const struct vop_win_data rk3399_vop_win_data[] = {
+       { .base = 0x00, .phy = &rk3399_win01_data,
+         .type = DRM_PLANE_TYPE_PRIMARY },
+       { .base = 0x40, .phy = &rk3288_win01_data,
+         .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x00, .phy = &rk3288_win23_data,
+         .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x50, .phy = &rk3288_win23_data,
+         .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_afbc rk3399_vop_afbc = {
+       .rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3),
+       .enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0),
+       .win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1),
+       .format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16),
+       .hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21),
+       .hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0),
+       .pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0),
 };
 
 static const struct vop_data rk3399_vop_big = {
@@ -826,9 +904,10 @@ static const struct vop_data rk3399_vop_big = {
        .common = &rk3288_common,
        .modeset = &rk3288_modeset,
        .output = &rk3399_output,
+       .afbc = &rk3399_vop_afbc,
        .misc = &rk3368_misc,
-       .win = rk3368_vop_win_data,
-       .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+       .win = rk3399_vop_win_data,
+       .win_size = ARRAY_SIZE(rk3399_vop_win_data),
        .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
 };
 
index 75a752d59ef1768e890a1b8306a8e45f0a44a254..03556dbfcafbf2a68ce6d511d029e5c06493d70a 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
 
 #include "shmob_drm_backlight.h"
@@ -558,15 +559,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
        .mode_set = shmob_drm_encoder_mode_set,
 };
 
-static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = shmob_drm_encoder_destroy,
-};
-
 int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
 {
        struct drm_encoder *encoder = &sdev->encoder.encoder;
@@ -576,8 +568,8 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
 
        encoder->possible_crtcs = 1;
 
-       ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(sdev->ddev, encoder,
+                                     DRM_MODE_ENCODER_LVDS);
        if (ret < 0)
                return ret;
 
index b8c0930959c7e84bb0ec9b7d1a96a10bcd338c65..ae9d6b8d3ca8764f2b6c63ffb0d61c51e1b3ced5 100644 (file)
@@ -192,7 +192,6 @@ static int shmob_drm_remove(struct platform_device *pdev)
 
        drm_dev_unregister(ddev);
        drm_kms_helper_poll_fini(ddev);
-       drm_mode_config_cleanup(ddev);
        drm_irq_uninstall(ddev);
        drm_dev_put(ddev);
 
@@ -288,7 +287,6 @@ err_irq_uninstall:
        drm_irq_uninstall(ddev);
 err_modeset_cleanup:
        drm_kms_helper_poll_fini(ddev);
-       drm_mode_config_cleanup(ddev);
 err_free_drm_dev:
        drm_dev_put(ddev);
 
index c51197b6fd8547420c1ba14c151dfc82e5510296..7a866d6ce6bb56461fc0efe8838c59a79f0dd453 100644 (file)
@@ -126,7 +126,11 @@ static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
 
 int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
 {
-       drm_mode_config_init(sdev->ddev);
+       int ret;
+
+       ret = drmm_mode_config_init(sdev->ddev);
+       if (ret)
+               return ret;
 
        shmob_drm_crtc_create(sdev);
        shmob_drm_encoder_create(sdev);
index c7652584255d3762387a856c0e807b8f3b80b5fb..319962a2c17bf6090c62550301ab420c8d5f9264 100644 (file)
@@ -42,8 +42,8 @@ static const struct sti_compositor_data stih407_compositor_data = {
        },
 };
 
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
-                               struct drm_minor *minor)
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+                                struct drm_minor *minor)
 {
        unsigned int i;
 
@@ -54,8 +54,6 @@ int sti_compositor_debugfs_init(struct sti_compositor *compo,
        for (i = 0; i < STI_MAX_MIXER; i++)
                if (compo->mixer[i])
                        sti_mixer_debugfs_init(compo->mixer[i], minor);
-
-       return 0;
 }
 
 static int sti_compositor_bind(struct device *dev,
index ac4bb38348102e597613d5f80e02cd4e16e9b825..25bb01bdd013f897faa4ffd151d3522fc362587b 100644 (file)
@@ -79,7 +79,7 @@ struct sti_compositor {
        struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
 };
 
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
-                               struct drm_minor *minor);
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+                                struct drm_minor *minor);
 
 #endif
index 49e6cb8f58367ee198d4a2e9146be0ac4b31b259..6f37c104c46f909f057b10df0a47dc561b868636 100644 (file)
@@ -319,7 +319,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc)
        struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
 
        if (drm_crtc_index(crtc) == 0)
-               return sti_compositor_debugfs_init(compo, crtc->dev->primary);
+               sti_compositor_debugfs_init(compo, crtc->dev->primary);
 
        return 0;
 }
index ea64c1dcaf634042a41e3bef1162eda03630c521..a9805743102375bb55696c33dc2fa8f317b618f6 100644 (file)
@@ -131,17 +131,17 @@ static struct drm_info_list cursor_debugfs_files[] = {
        { "cursor", cursor_dbg_show, 0, NULL },
 };
 
-static int cursor_debugfs_init(struct sti_cursor *cursor,
-                              struct drm_minor *minor)
+static void cursor_debugfs_init(struct sti_cursor *cursor,
+                               struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
                cursor_debugfs_files[i].data = cursor;
 
-       return drm_debugfs_create_files(cursor_debugfs_files,
-                                       ARRAY_SIZE(cursor_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(cursor_debugfs_files,
+                                ARRAY_SIZE(cursor_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
@@ -342,7 +342,9 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
        struct sti_plane *plane = to_sti_plane(drm_plane);
        struct sti_cursor *cursor = to_sti_cursor(plane);
 
-       return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+       cursor_debugfs_init(cursor, drm_plane->dev->primary);
+
+       return 0;
 }
 
 static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
index 50870d8cbb76739bcee4d9b5ea867fc7b0f2401a..3f9db3e3f39780799197cb0a2abc166dbaf85860 100644 (file)
@@ -92,24 +92,16 @@ static struct drm_info_list sti_drm_dbg_list[] = {
        {"fps_get", sti_drm_fps_dbg_show, 0},
 };
 
-static int sti_drm_dbg_init(struct drm_minor *minor)
+static void sti_drm_dbg_init(struct drm_minor *minor)
 {
-       int ret;
-
-       ret = drm_debugfs_create_files(sti_drm_dbg_list,
-                                      ARRAY_SIZE(sti_drm_dbg_list),
-                                      minor->debugfs_root, minor);
-       if (ret)
-               goto err;
+       drm_debugfs_create_files(sti_drm_dbg_list,
+                                ARRAY_SIZE(sti_drm_dbg_list),
+                                minor->debugfs_root, minor);
 
        debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root,
                            minor->dev, &sti_drm_fps_fops);
 
        DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
-       return 0;
-err:
-       DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
-       return ret;
 }
 
 static const struct drm_mode_config_funcs sti_mode_config_funcs = {
index 3d04bfca21a06232d6fe5ff54a35a0f4d4c4fab3..de4af7735c469c8620acd803866c2d4cc7451fe2 100644 (file)
@@ -196,16 +196,16 @@ static struct drm_info_list dvo_debugfs_files[] = {
        { "dvo", dvo_dbg_show, 0, NULL },
 };
 
-static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
                dvo_debugfs_files[i].data = dvo;
 
-       return drm_debugfs_create_files(dvo_debugfs_files,
-                                       ARRAY_SIZE(dvo_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(dvo_debugfs_files,
+                                ARRAY_SIZE(dvo_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_dvo_disable(struct drm_bridge *bridge)
@@ -405,10 +405,7 @@ static int sti_dvo_late_register(struct drm_connector *connector)
                = to_sti_dvo_connector(connector);
        struct sti_dvo *dvo = dvo_connector->dvo;
 
-       if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
-               DRM_ERROR("DVO debugfs setup failed\n");
-               return -EINVAL;
-       }
+       dvo_debugfs_init(dvo, dvo->drm_dev->primary);
 
        return 0;
 }
index 11595c748844f6f0a088f698b4c4cf0011142bf7..2d5a2b5b78b8e5dd5f202dbb009b09c1615e8ef4 100644 (file)
@@ -343,9 +343,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
        for (i = 0; i < nb_files; i++)
                gdp_debugfs_files[i].data = gdp;
 
-       return drm_debugfs_create_files(gdp_debugfs_files,
-                                       nb_files,
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(gdp_debugfs_files,
+                                nb_files,
+                                minor->debugfs_root, minor);
+       return 0;
 }
 
 static int sti_gdp_fourcc2format(int fourcc)
index f3f28d79b0e40ea3a22685351f67022c1e060e36..a1ec891eaf3aaad720b6ddd797b14892753224a4 100644 (file)
@@ -367,16 +367,16 @@ static struct drm_info_list hda_debugfs_files[] = {
        { "hda", hda_dbg_show, 0, NULL },
 };
 
-static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
                hda_debugfs_files[i].data = hda;
 
-       return drm_debugfs_create_files(hda_debugfs_files,
-                                       ARRAY_SIZE(hda_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(hda_debugfs_files,
+                                ARRAY_SIZE(hda_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 /**
@@ -643,10 +643,7 @@ static int sti_hda_late_register(struct drm_connector *connector)
                = to_sti_hda_connector(connector);
        struct sti_hda *hda = hda_connector->hda;
 
-       if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
-               DRM_ERROR("HDA debugfs setup failed\n");
-               return -EINVAL;
-       }
+       hda_debugfs_init(hda, hda->drm_dev->primary);
 
        return 0;
 }
index 18eaf786ffa46640060e447adc80e8fb90131c5e..5b15c4974e6b5235fd1e32bc0cc7b46ee492e891 100644 (file)
@@ -727,16 +727,16 @@ static struct drm_info_list hdmi_debugfs_files[] = {
        { "hdmi", hdmi_dbg_show, 0, NULL },
 };
 
-static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
                hdmi_debugfs_files[i].data = hdmi;
 
-       return drm_debugfs_create_files(hdmi_debugfs_files,
-                                       ARRAY_SIZE(hdmi_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(hdmi_debugfs_files,
+                                ARRAY_SIZE(hdmi_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_hdmi_disable(struct drm_bridge *bridge)
@@ -1113,10 +1113,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
                = to_sti_hdmi_connector(connector);
        struct sti_hdmi *hdmi = hdmi_connector->hdmi;
 
-       if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
-               DRM_ERROR("HDMI debugfs setup failed\n");
-               return -EINVAL;
-       }
+       hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary);
 
        return 0;
 }
index 1015abe0ce086f8bf489ba76d7e203d402890e49..5a4e12194a77dbef4c2958d2e55fd261546ed2c1 100644 (file)
@@ -639,16 +639,16 @@ static struct drm_info_list hqvdp_debugfs_files[] = {
        { "hqvdp", hqvdp_dbg_show, 0, NULL },
 };
 
-static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
                hqvdp_debugfs_files[i].data = hqvdp;
 
-       return drm_debugfs_create_files(hqvdp_debugfs_files,
-                                       ARRAY_SIZE(hqvdp_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(hqvdp_debugfs_files,
+                                ARRAY_SIZE(hqvdp_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 /**
@@ -1274,7 +1274,9 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
        struct sti_plane *plane = to_sti_plane(drm_plane);
        struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
 
-       return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+       hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+
+       return 0;
 }
 
 static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
index c3a3e1e5fc8ab8317df27fbb5bcf4ea110a55098..7e5f14646625b437a28b94c4a173ed7dc9ee9d23 100644 (file)
@@ -178,7 +178,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
        { "mixer_aux", mixer_dbg_show, 0, NULL },
 };
 
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
 {
        unsigned int i;
        struct drm_info_list *mixer_debugfs_files;
@@ -194,15 +194,15 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
                nb_files = ARRAY_SIZE(mixer1_debugfs_files);
                break;
        default:
-               return -EINVAL;
+               return;
        }
 
        for (i = 0; i < nb_files; i++)
                mixer_debugfs_files[i].data = mixer;
 
-       return drm_debugfs_create_files(mixer_debugfs_files,
-                                       nb_files,
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(mixer_debugfs_files,
+                                nb_files,
+                                minor->debugfs_root, minor);
 }
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
index d9544246913a107da24f5abff1c0fb9e3a1409e4..ab06beb7b25843b6df6ea333ad3220bb5d5c0213 100644 (file)
@@ -58,7 +58,7 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
 
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
 
 /* depth in Cross-bar control = z order */
 #define GAM_MIXER_NB_DEPTH_LEVEL 6
index c36a8da373cb5d81b0269d31a1295f07b2b83bbb..df3817f0fd30231d3a4d3789e36e297775b7a025 100644 (file)
@@ -570,16 +570,16 @@ static struct drm_info_list tvout_debugfs_files[] = {
        { "tvout", tvout_dbg_show, 0, NULL },
 };
 
-static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
                tvout_debugfs_files[i].data = tvout;
 
-       return drm_debugfs_create_files(tvout_debugfs_files,
-                                       ARRAY_SIZE(tvout_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(tvout_debugfs_files,
+                                ARRAY_SIZE(tvout_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -603,14 +603,11 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
 static int sti_tvout_late_register(struct drm_encoder *encoder)
 {
        struct sti_tvout *tvout = to_sti_tvout(encoder);
-       int ret;
 
        if (tvout->debugfs_registered)
                return 0;
 
-       ret = tvout_debugfs_init(tvout, encoder->dev->primary);
-       if (ret)
-               return ret;
+       tvout_debugfs_init(tvout, encoder->dev->primary);
 
        tvout->debugfs_registered = true;
        return 0;
index 2d4230410464471fe615f29b4155976c3222d1f7..2d818397918db832ccacd76dab86fdd54563a737 100644 (file)
@@ -124,16 +124,16 @@ static struct drm_info_list vid_debugfs_files[] = {
        { "vid", vid_dbg_show, 0, NULL },
 };
 
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
                vid_debugfs_files[i].data = vid;
 
-       return drm_debugfs_create_files(vid_debugfs_files,
-                                       ARRAY_SIZE(vid_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(vid_debugfs_files,
+                                ARRAY_SIZE(vid_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 void sti_vid_commit(struct sti_vid *vid,
index 9dbd78461de12b419e4e4d7ae00b54c8be4d12d6..991849ba50b58845e181c071461543d8a17a1800 100644 (file)
@@ -26,6 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
 struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
                               int id, void __iomem *baseaddr);
 
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
 
 #endif
index ea9fcbdc68b397787c84edb609a21a007be2e093..0f85dd86cafa73f24f1ec33e3ed8fb36a764eabc 100644 (file)
@@ -88,7 +88,9 @@ static int drv_load(struct drm_device *ddev)
 
        ddev->dev_private = (void *)ldev;
 
-       drm_mode_config_init(ddev);
+       ret = drmm_mode_config_init(ddev);
+       if (ret)
+               return ret;
 
        /*
         * set max width and height as default value.
@@ -103,7 +105,7 @@ static int drv_load(struct drm_device *ddev)
 
        ret = ltdc_load(ddev);
        if (ret)
-               goto err;
+               return ret;
 
        drm_mode_config_reset(ddev);
        drm_kms_helper_poll_init(ddev);
@@ -111,9 +113,6 @@ static int drv_load(struct drm_device *ddev)
        platform_set_drvdata(pdev, ddev);
 
        return 0;
-err:
-       drm_mode_config_cleanup(ddev);
-       return ret;
 }
 
 static void drv_unload(struct drm_device *ddev)
@@ -122,7 +121,6 @@ static void drv_unload(struct drm_device *ddev)
 
        drm_kms_helper_poll_fini(ddev);
        ltdc_unload(ddev);
-       drm_mode_config_cleanup(ddev);
 }
 
 static __maybe_unused int drv_suspend(struct device *dev)
index df585fe64f614913f593db359661b1cbccf8dc83..f894968d6e452a7ca10d97f78aea0da00519abaa 100644 (file)
@@ -42,8 +42,6 @@
 
 #define MAX_IRQ 4
 
-#define MAX_ENDPOINTS 2
-
 #define HWVER_10200 0x010200
 #define HWVER_10300 0x010300
 #define HWVER_20101 0x020101
@@ -1201,36 +1199,20 @@ int ltdc_load(struct drm_device *ddev)
        struct ltdc_device *ldev = ddev->dev_private;
        struct device *dev = ddev->dev;
        struct device_node *np = dev->of_node;
-       struct drm_bridge *bridge[MAX_ENDPOINTS] = {NULL};
-       struct drm_panel *panel[MAX_ENDPOINTS] = {NULL};
+       struct drm_bridge *bridge;
+       struct drm_panel *panel;
        struct drm_crtc *crtc;
        struct reset_control *rstc;
        struct resource *res;
-       int irq, ret, i, endpoint_not_ready = -ENODEV;
+       int irq, i, nb_endpoints;
+       int ret = -ENODEV;
 
        DRM_DEBUG_DRIVER("\n");
 
-       /* Get endpoints if any */
-       for (i = 0; i < MAX_ENDPOINTS; i++) {
-               ret = drm_of_find_panel_or_bridge(np, 0, i, &panel[i],
-                                                 &bridge[i]);
-
-               /*
-                * If at least one endpoint is -EPROBE_DEFER, defer probing,
-                * else if at least one endpoint is ready, continue probing.
-                */
-               if (ret == -EPROBE_DEFER)
-                       return ret;
-               else if (!ret)
-                       endpoint_not_ready = 0;
-       }
-
-       if (endpoint_not_ready)
-               return endpoint_not_ready;
-
-       rstc = devm_reset_control_get_exclusive(dev, NULL);
-
-       mutex_init(&ldev->err_lock);
+       /* Get number of endpoints */
+       nb_endpoints = of_graph_get_endpoint_count(np);
+       if (!nb_endpoints)
+               return -ENODEV;
 
        ldev->pixel_clk = devm_clk_get(dev, "lcd");
        if (IS_ERR(ldev->pixel_clk)) {
@@ -1244,6 +1226,43 @@ int ltdc_load(struct drm_device *ddev)
                return -ENODEV;
        }
 
+       /* Get endpoints if any */
+       for (i = 0; i < nb_endpoints; i++) {
+               ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
+
+               /*
+                * If at least one endpoint is -ENODEV, continue probing,
+                * else if at least one endpoint returned an error
+                * (ie -EPROBE_DEFER) then stop probing.
+                */
+               if (ret == -ENODEV)
+                       continue;
+               else if (ret)
+                       goto err;
+
+               if (panel) {
+                       bridge = drm_panel_bridge_add_typed(panel,
+                                                           DRM_MODE_CONNECTOR_DPI);
+                       if (IS_ERR(bridge)) {
+                               DRM_ERROR("panel-bridge endpoint %d\n", i);
+                               ret = PTR_ERR(bridge);
+                               goto err;
+                       }
+               }
+
+               if (bridge) {
+                       ret = ltdc_encoder_init(ddev, bridge);
+                       if (ret) {
+                               DRM_ERROR("init encoder endpoint %d\n", i);
+                               goto err;
+                       }
+               }
+       }
+
+       rstc = devm_reset_control_get_exclusive(dev, NULL);
+
+       mutex_init(&ldev->err_lock);
+
        if (!IS_ERR(rstc)) {
                reset_control_assert(rstc);
                usleep_range(10, 20);
@@ -1285,27 +1304,7 @@ int ltdc_load(struct drm_device *ddev)
                        DRM_ERROR("Failed to register LTDC interrupt\n");
                        goto err;
                }
-       }
 
-       /* Add endpoints panels or bridges if any */
-       for (i = 0; i < MAX_ENDPOINTS; i++) {
-               if (panel[i]) {
-                       bridge[i] = drm_panel_bridge_add_typed(panel[i],
-                                                              DRM_MODE_CONNECTOR_DPI);
-                       if (IS_ERR(bridge[i])) {
-                               DRM_ERROR("panel-bridge endpoint %d\n", i);
-                               ret = PTR_ERR(bridge[i]);
-                               goto err;
-                       }
-               }
-
-               if (bridge[i]) {
-                       ret = ltdc_encoder_init(ddev, bridge[i]);
-                       if (ret) {
-                               DRM_ERROR("init encoder endpoint %d\n", i);
-                               goto err;
-                       }
-               }
        }
 
        crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
@@ -1340,8 +1339,8 @@ int ltdc_load(struct drm_device *ddev)
 
        return 0;
 err:
-       for (i = 0; i < MAX_ENDPOINTS; i++)
-               drm_panel_bridge_remove(bridge[i]);
+       for (i = 0; i < nb_endpoints; i++)
+               drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
 
        clk_disable_unprepare(ldev->pixel_clk);
 
@@ -1350,11 +1349,14 @@ err:
 
 void ltdc_unload(struct drm_device *ddev)
 {
-       int i;
+       struct device *dev = ddev->dev;
+       int nb_endpoints, i;
 
        DRM_DEBUG_DRIVER("\n");
 
-       for (i = 0; i < MAX_ENDPOINTS; i++)
+       nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
+
+       for (i = 0; i < nb_endpoints; i++)
                drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
 
        pm_runtime_disable(ddev->dev);
index 68d4644ac2dcc32a2d4118d824f13eba5d284921..e324d7db7b7db6a698a3fdf6b338f3263ae59e90 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_backend.h"
 #include "sun4i_crtc.h"
@@ -204,10 +205,6 @@ static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
        .mode_valid     = sun4i_hdmi_mode_valid,
 };
 
-static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 static int sun4i_hdmi_get_modes(struct drm_connector *connector)
 {
        struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -611,11 +608,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
 
        drm_encoder_helper_add(&hdmi->encoder,
                               &sun4i_hdmi_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &hdmi->encoder,
-                              &sun4i_hdmi_funcs,
-                              DRM_MODE_ENCODER_TMDS,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &hdmi->encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                dev_err(dev, "Couldn't initialise the HDMI encoder\n");
                goto err_put_ddc_i2c;
index 26e5c7ceb8ffd25110b4831a715ee71fa6ea29c6..ffda3184aa12ab2dccfec07214695207f57b2380 100644 (file)
@@ -12,6 +12,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
@@ -96,10 +97,6 @@ static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
        .enable         = sun4i_lvds_encoder_enable,
 };
 
-static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 {
        struct drm_encoder *encoder;
@@ -121,11 +118,8 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 
        drm_encoder_helper_add(&lvds->encoder,
                               &sun4i_lvds_enc_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &lvds->encoder,
-                              &sun4i_lvds_enc_funcs,
-                              DRM_MODE_ENCODER_LVDS,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &lvds->encoder,
+                                     DRM_MODE_ENCODER_LVDS);
        if (ret) {
                dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
                goto err_out;
index 3b23d5be3cf3b6734d0ceb2180ad0d358dcb9fca..5a7d43939ae67819742ceec29e189084749f3a3a 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
@@ -188,15 +189,6 @@ static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
        .mode_valid     = sun4i_rgb_mode_valid,
 };
 
-static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
-       .destroy        = sun4i_rgb_enc_destroy,
-};
-
 int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 {
        struct drm_encoder *encoder;
@@ -218,11 +210,8 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 
        drm_encoder_helper_add(&rgb->encoder,
                               &sun4i_rgb_enc_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &rgb->encoder,
-                              &sun4i_rgb_enc_funcs,
-                              DRM_MODE_ENCODER_NONE,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &rgb->encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret) {
                dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
                goto err_out;
index 624437b27cdc48db078a54ae3a18b87633d8e373..359b56e43b83c564e48e062c2f12a1524f20485f 100644 (file)
@@ -812,10 +812,8 @@ static int sun4i_tcon_init_irq(struct device *dev,
        int irq, ret;
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "Couldn't retrieve the TCON interrupt\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
                               dev_name(dev), tcon);
index 39c15282e4489ef4df03ae406a7bcf1cc8472849..63f4428ac3bf786e31f85db704921360276d6b57 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_drv.h"
@@ -473,15 +474,6 @@ static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
        .mode_set       = sun4i_tv_mode_set,
 };
 
-static void sun4i_tv_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_tv_funcs = {
-       .destroy        = sun4i_tv_destroy,
-};
-
 static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
 {
        int i;
@@ -592,11 +584,8 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
 
        drm_encoder_helper_add(&tv->encoder,
                               &sun4i_tv_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &tv->encoder,
-                              &sun4i_tv_funcs,
-                              DRM_MODE_ENCODER_TVDAC,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &tv->encoder,
+                                     DRM_MODE_ENCODER_TVDAC);
        if (ret) {
                dev_err(dev, "Couldn't initialise the TV encoder\n");
                goto err_disable_clk;
index 059939789730d893d089634e37daadb866c60387..f6c67dd87a054d0deafc1ea3a2a57692d2df748e 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
@@ -846,10 +847,6 @@ static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
        .enable         = sun6i_dsi_encoder_enable,
 };
 
-static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
                                       const struct mipi_dsi_msg *msg)
 {
@@ -1062,11 +1059,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
 
        drm_encoder_helper_add(&dsi->encoder,
                               &sun6i_dsi_enc_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &dsi->encoder,
-                              &sun6i_dsi_enc_funcs,
-                              DRM_MODE_ENCODER_DSI,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &dsi->encoder,
+                                     DRM_MODE_ENCODER_DSI);
        if (ret) {
                dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
                return ret;
index e8a317d5ba194442134ef57f967ab5bf8dfd3549..972682bb8000919df20765945d09adf3e61b2b2b 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun8i_dw_hdmi.h"
 #include "sun8i_tcon_top.h"
@@ -29,10 +30,6 @@ sun8i_dw_hdmi_encoder_helper_funcs = {
        .mode_set = sun8i_dw_hdmi_encoder_mode_set,
 };
 
-static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_mode_status
 sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
                              const struct drm_display_mode *mode)
@@ -220,8 +217,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
        }
 
        drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        sun8i_hdmi_phy_init(hdmi->phy);
 
index 4a64f7ae437a8e13dc9510ae4e1264a87c4d9295..56cc037fd31288e1fb2364e3b1ff001106eec2c4 100644 (file)
 #include "sun8i_vi_layer.h"
 #include "sunxi_engine.h"
 
+struct de2_fmt_info {
+       u32     drm_fmt;
+       u32     de2_fmt;
+};
+
 static const struct de2_fmt_info de2_formats[] = {
        {
                .drm_fmt = DRM_FORMAT_ARGB8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_XRGB8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_XBGR8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBX8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRX8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGB888,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGR888,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGR888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGB565,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGB565,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGR565,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGR565,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ARGB4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XRGB4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XBGR4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_RGBX4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_BGRX4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ARGB1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XRGB1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XBGR1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_RGBX5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_BGRX5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ARGB2101010,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR2101010,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA1010102,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA1010102,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_UYVY,
                .de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_VYUY,
                .de2_fmt = SUN8I_MIXER_FBFMT_VYUY,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUYV,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUYV,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVYU,
                .de2_fmt = SUN8I_MIXER_FBFMT_YVYU,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV16,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV16,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV61,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV61,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV12,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV12,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV21,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV21,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUV422,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUV420,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUV411,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVU422,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YVU2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVU420,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YVU2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVU411,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YVU2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_P010,
                .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_P210,
                .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
 };
 
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(de2_formats); ++i)
-               if (de2_formats[i].drm_fmt == format)
-                       return &de2_formats[i];
+               if (de2_formats[i].drm_fmt == format) {
+                       *hw_format = de2_formats[i].de2_fmt;
+                       return 0;
+               }
 
-       return NULL;
+       return -EINVAL;
 }
 
 static void sun8i_mixer_commit(struct sunxi_engine *engine)
index 345b28b0a80a1b8abe27870831d74b3e25f61256..7576b523fdbb1409c720ca5874425a71be9cc411 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/regmap.h>
 #include <linux/reset.h>
 
-#include "sun8i_csc.h"
 #include "sunxi_engine.h"
 
 #define SUN8I_MIXER_SIZE(w, h)                 (((h) - 1) << 16 | ((w) - 1))
 #define SUN50I_MIXER_CDC0_EN                   0xd0000
 #define SUN50I_MIXER_CDC1_EN                   0xd8000
 
-struct de2_fmt_info {
-       u32                     drm_fmt;
-       u32                     de2_fmt;
-       bool                    rgb;
-       enum sun8i_csc_mode     csc;
-};
-
 /**
  * struct sun8i_mixer_cfg - mixer HW configuration
  * @vi_num: number of VI channels
@@ -210,5 +202,5 @@ sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
                return DE2_CH_BASE + channel * DE2_CH_SIZE;
 }
 
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format);
 #endif /* _SUN8I_MIXER_H_ */
index c87fd842918e58c10e74ecf2142e18781a34fb8f..54f937a7d5e7aa5d58faaf257afac094f0353f2e 100644 (file)
@@ -19,8 +19,8 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 
-#include "sun8i_ui_layer.h"
 #include "sun8i_mixer.h"
+#include "sun8i_ui_layer.h"
 #include "sun8i_ui_scaler.h"
 
 static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -174,18 +174,20 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
                                         int overlay, struct drm_plane *plane)
 {
        struct drm_plane_state *state = plane->state;
-       const struct de2_fmt_info *fmt_info;
-       u32 val, ch_base;
+       const struct drm_format_info *fmt;
+       u32 val, ch_base, hw_fmt;
+       int ret;
 
        ch_base = sun8i_channel_base(mixer, channel);
 
-       fmt_info = sun8i_mixer_format_info(state->fb->format->format);
-       if (!fmt_info || !fmt_info->rgb) {
+       fmt = state->fb->format;
+       ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+       if (ret || fmt->is_yuv) {
                DRM_DEBUG_DRIVER("Invalid format\n");
                return -EINVAL;
        }
 
-       val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
+       val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
        regmap_update_bits(mixer->engine.regs,
                           SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
                           SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
index b8398ca18b0fd20027f136de12048cb7a094707d..22c8c5375d0db8d0c53f96a6c56092914497cac4 100644 (file)
@@ -12,8 +12,9 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 
-#include "sun8i_vi_layer.h"
+#include "sun8i_csc.h"
 #include "sun8i_mixer.h"
+#include "sun8i_vi_layer.h"
 #include "sun8i_vi_scaler.h"
 
 static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -210,28 +211,47 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
        return 0;
 }
 
+static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+{
+       if (!format->is_yuv)
+               return SUN8I_CSC_MODE_OFF;
+
+       switch (format->format) {
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YVU444:
+               return SUN8I_CSC_MODE_YVU2RGB;
+       default:
+               return SUN8I_CSC_MODE_YUV2RGB;
+       }
+}
+
 static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
                                         int overlay, struct drm_plane *plane)
 {
        struct drm_plane_state *state = plane->state;
-       const struct de2_fmt_info *fmt_info;
-       u32 val, ch_base;
+       u32 val, ch_base, csc_mode, hw_fmt;
+       const struct drm_format_info *fmt;
+       int ret;
 
        ch_base = sun8i_channel_base(mixer, channel);
 
-       fmt_info = sun8i_mixer_format_info(state->fb->format->format);
-       if (!fmt_info) {
+       fmt = state->fb->format;
+       ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+       if (ret) {
                DRM_DEBUG_DRIVER("Invalid format\n");
-               return -EINVAL;
+               return ret;
        }
 
-       val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
+       val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
        regmap_update_bits(mixer->engine.regs,
                           SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
                           SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
 
-       if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
-               sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc,
+       csc_mode = sun8i_vi_layer_get_csc_mode(fmt);
+       if (csc_mode != SUN8I_CSC_MODE_OFF) {
+               sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode,
                                                state->color_encoding,
                                                state->color_range);
                sun8i_csc_enable_ccsc(mixer, channel, true);
@@ -239,7 +259,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
                sun8i_csc_enable_ccsc(mixer, channel, false);
        }
 
-       if (fmt_info->rgb)
+       if (!fmt->is_yuv)
                val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
        else
                val = 0;
index 1a7b08f35776826042ee1337d9df350d2d7d275f..83f31c6e891c6403807d50f430da5d2e7ce9a1df 100644 (file)
@@ -1496,7 +1496,6 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
        struct drm_minor *minor = crtc->dev->primary;
        struct dentry *root;
        struct tegra_dc *dc = to_tegra_dc(crtc);
-       int err;
 
 #ifdef CONFIG_DEBUG_FS
        root = crtc->debugfs_entry;
@@ -1512,17 +1511,9 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
        for (i = 0; i < count; i++)
                dc->debugfs_files[i].data = dc;
 
-       err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(dc->debugfs_files);
-       dc->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_dc_early_unregister(struct drm_crtc *crtc)
index 7dfb50f65067f291544f06f011f5cfa647193f1b..105fb9cdbb3bdc6cd05acc0bbf1083f23234a26a 100644 (file)
@@ -5,12 +5,10 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
index bd268028fb3d625c9c637b642a540d0e008dcfab..d4f51b5c7ee55c2092dd96106e43cbf629ea7e1a 100644 (file)
@@ -839,11 +839,11 @@ static struct drm_info_list tegra_debugfs_list[] = {
        { "iova", tegra_debugfs_iova, 0 },
 };
 
-static int tegra_debugfs_init(struct drm_minor *minor)
+static void tegra_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(tegra_debugfs_list,
-                                       ARRAY_SIZE(tegra_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(tegra_debugfs_list,
+                                ARRAY_SIZE(tegra_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index ed99b67deb292c15e0deb5b59216bd00c7ceadfb..b25443255be6beb1995c675fc377d0b1152f1968 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/host1x.h>
 #include <linux/iova.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_edid.h>
@@ -152,8 +152,6 @@ enum drm_connector_status
 tegra_output_connector_detect(struct drm_connector *connector, bool force);
 void tegra_output_connector_destroy(struct drm_connector *connector);
 
-void tegra_output_encoder_destroy(struct drm_encoder *encoder);
-
 /* from dpaux.c */
 struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
 enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
index 88b9d64c77bf2ee71528ff65ca857b4e6cc3b652..38beab9ab4f8c0921f2c3840b38a4528bbdf27ce 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "dc.h"
 #include "drm.h"
@@ -234,7 +235,6 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
        struct drm_minor *minor = connector->dev->primary;
        struct dentry *root = connector->debugfs_entry;
        struct tegra_dsi *dsi = to_dsi(output);
-       int err;
 
        dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
                                     GFP_KERNEL);
@@ -244,17 +244,9 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
        for (i = 0; i < count; i++)
                dsi->debugfs_files[i].data = dsi;
 
-       err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(dsi->debugfs_files);
-       dsi->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_dsi_early_unregister(struct drm_connector *connector)
@@ -824,10 +816,6 @@ static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs
        .mode_valid = tegra_dsi_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
 {
        int err;
@@ -1058,9 +1046,8 @@ static int tegra_dsi_init(struct host1x_client *client)
                                         &tegra_dsi_connector_helper_funcs);
                dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
-               drm_encoder_init(drm, &dsi->output.encoder,
-                                &tegra_dsi_encoder_funcs,
-                                DRM_MODE_ENCODER_DSI, NULL);
+               drm_simple_encoder_init(drm, &dsi->output.encoder,
+                                       DRM_MODE_ENCODER_DSI);
                drm_encoder_helper_add(&dsi->output.encoder,
                                       &tegra_dsi_encoder_helper_funcs);
 
index b8a328f538626e7a2b833a9f4aac60c70ee6077d..2b0666ac681b87214cc0dbcc57b0ee22a5e94dcb 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
  *
  * Based on the KMS/FB CMA helpers
- *   Copyright (C) 2012 Analog Device Inc.
+ *   Copyright (C) 2012 Analog Devices Inc.
  */
 
 #include <linux/console.h>
index 38252c0f068dff704928a34435f06f7374697b5e..d09a24931c87cbb8499490fdfba287401d0fbf04 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/clk.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
 #include <linux/hdmi.h>
 #include <linux/math64.h>
 #include <linux/module.h>
@@ -22,6 +21,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "hda.h"
 #include "hdmi.h"
@@ -1064,7 +1064,6 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
        struct drm_minor *minor = connector->dev->primary;
        struct dentry *root = connector->debugfs_entry;
        struct tegra_hdmi *hdmi = to_hdmi(output);
-       int err;
 
        hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
                                      GFP_KERNEL);
@@ -1074,17 +1073,9 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
        for (i = 0; i < count; i++)
                hdmi->debugfs_files[i].data = hdmi;
 
-       err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(hdmi->debugfs_files);
-       hdmi->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_hdmi_early_unregister(struct drm_connector *connector)
@@ -1136,10 +1127,6 @@ tegra_hdmi_connector_helper_funcs = {
        .mode_valid = tegra_hdmi_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
        struct tegra_output *output = encoder_to_output(encoder);
@@ -1445,8 +1432,8 @@ static int tegra_hdmi_init(struct host1x_client *client)
                                 &tegra_hdmi_connector_helper_funcs);
        hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, &hdmi->output.encoder,
+                               DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(&hdmi->output.encoder,
                               &tegra_hdmi_encoder_helper_funcs);
 
index a264259b97a26b85fb7fd1d203e9ab93a68bd2f5..e36e5e7c2f694dfe627b1c720d959509041735ce 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "drm.h"
 #include "dc.h"
@@ -79,11 +80,6 @@ void tegra_output_connector_destroy(struct drm_connector *connector)
        drm_connector_cleanup(connector);
 }
 
-void tegra_output_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
 static irqreturn_t hpd_irq(int irq, void *data)
 {
        struct tegra_output *output = data;
index 4be4dfd4a68a3e31bd085900eccd46541459b08b..0562a7eb793f31c6bb0e4dc9c0ae35efe1d2f5f3 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "drm.h"
 #include "dc.h"
@@ -110,10 +111,6 @@ static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs
        .mode_valid = tegra_rgb_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
 {
        struct tegra_output *output = encoder_to_output(encoder);
@@ -281,8 +278,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
                                 &tegra_rgb_connector_helper_funcs);
        output->connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
        drm_encoder_helper_add(&output->encoder,
                               &tegra_rgb_encoder_helper_funcs);
 
index 81226a4953c157c68b91753416235e6f3be933b4..7cbcf9617f5e6b2dcc31fb1e59cab96c49e2c342 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/debugfs.h>
-#include <linux/gpio.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
@@ -23,6 +22,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_scdc_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "dc.h"
 #include "dp.h"
@@ -1687,7 +1687,6 @@ static int tegra_sor_late_register(struct drm_connector *connector)
        struct drm_minor *minor = connector->dev->primary;
        struct dentry *root = connector->debugfs_entry;
        struct tegra_sor *sor = to_sor(output);
-       int err;
 
        sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
                                     GFP_KERNEL);
@@ -1697,17 +1696,9 @@ static int tegra_sor_late_register(struct drm_connector *connector)
        for (i = 0; i < count; i++)
                sor->debugfs_files[i].data = sor;
 
-       err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(sor->debugfs_files);
-       sor->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_sor_early_unregister(struct drm_connector *connector)
@@ -1805,10 +1796,6 @@ static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs
        .mode_valid = tegra_sor_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static int
 tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
                               struct drm_crtc_state *crtc_state,
@@ -3102,8 +3089,7 @@ static int tegra_sor_init(struct host1x_client *client)
                                 &tegra_sor_connector_helper_funcs);
        sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
-                        encoder, NULL);
+       drm_simple_encoder_init(drm, &sor->output.encoder, encoder);
        drm_encoder_helper_add(&sor->output.encoder, helpers);
 
        drm_connector_attach_encoder(&sor->output.connector,
index d4ce9bab8c7e61369ab52bc6617d3a6fddeb3610..2396262c09e4c46e8ad79d52f463f1a9cb129b8e 100644 (file)
@@ -24,7 +24,7 @@
 static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
 {
        struct drm_device *ddev = tcrtc->crtc.dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct drm_pending_vblank_event *event;
        unsigned long flags;
        bool busy;
@@ -88,7 +88,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
                                   struct drm_crtc_state *state)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct dispc_device *dispc = tidss->dispc;
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        u32 hw_videoport = tcrtc->hw_videoport;
@@ -165,7 +165,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned long flags;
 
        dev_dbg(ddev->dev,
@@ -216,7 +216,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        unsigned long flags;
        int r;
@@ -259,7 +259,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned long flags;
 
        dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
@@ -295,7 +295,7 @@ enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
 }
@@ -314,7 +314,7 @@ static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
 static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
 
@@ -328,7 +328,7 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
 static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
 
index 29f42768e294738904d6fd09df3d901c795a002e..629dd06393f68051187a1d3abdb1d1e5081b7763 100644 (file)
@@ -181,10 +181,6 @@ const struct dispc_features dispc_am65x_feats = {
        .vid_name = { "vid", "vidl1" },
        .vid_lite = { false, true, },
        .vid_order = { 1, 0 },
-
-       .errata = {
-               .i2000 = true,
-       },
 };
 
 static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -2674,12 +2670,9 @@ int dispc_init(struct tidss_device *tidss)
                return -ENOMEM;
 
        num_fourccs = 0;
-       for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
-               if (feat->errata.i2000 &&
-                   dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
-                       continue;
+       for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i)
                dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
-       }
+
        dispc->num_fourccs = num_fourccs;
        dispc->tidss = tidss;
        dispc->dev = dev;
index a4a68249e44b8da17eccd21f08409cea4adb1009..902e612ff7acd9047f228ef88eedcdbb79dfb9e5 100644 (file)
@@ -46,10 +46,6 @@ struct dispc_features_scaling {
        u32 xinc_max;
 };
 
-struct dispc_errata {
-       bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
-};
-
 enum dispc_vp_bus_type {
        DISPC_VP_DPI,           /* DPI output */
        DISPC_VP_OLDI,          /* OLDI (LVDS) output */
@@ -83,8 +79,6 @@ struct dispc_features {
        const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
        bool vid_lite[TIDSS_MAX_PLANES];
        u32 vid_order[TIDSS_MAX_PLANES];
-
-       struct dispc_errata errata;
 };
 
 extern const struct dispc_features dispc_k2g_feats;
index d95e4be2c7b9f7f35b036bf1bcc7e813c0da6410..99edc66ebdef29b0a7e1f7bf2d203861a96bfd76 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 
 #include "tidss_dispc.h"
@@ -102,15 +103,7 @@ static const struct dev_pm_ops tidss_pm_ops = {
 
 static void tidss_release(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
-
        drm_kms_helper_poll_fini(ddev);
-
-       tidss_modeset_cleanup(tidss);
-
-       drm_dev_fini(ddev);
-
-       kfree(tidss);
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
@@ -142,26 +135,18 @@ static int tidss_probe(struct platform_device *pdev)
 
        dev_dbg(dev, "%s\n", __func__);
 
-       /* Can't use devm_* since drm_device's lifetime may exceed dev's */
-       tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
-       if (!tidss)
-               return -ENOMEM;
+       tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver,
+                                  struct tidss_device, ddev);
+       if (IS_ERR(tidss))
+               return PTR_ERR(tidss);
 
        ddev = &tidss->ddev;
 
-       ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
-       if (ret) {
-               kfree(ddev);
-               return ret;
-       }
-
        tidss->dev = dev;
        tidss->feat = of_device_get_match_data(dev);
 
        platform_set_drvdata(pdev, tidss);
 
-       ddev->dev_private = tidss;
-
        ret = dispc_init(tidss);
        if (ret) {
                dev_err(dev, "failed to initialize dispc: %d\n", ret);
index e2aa6436ad1833ff073f1a26a9699730a5f77693..3b0a3d87b7c4c2779c57212cbb254f521082fc10 100644 (file)
@@ -29,10 +29,10 @@ struct tidss_device {
 
        spinlock_t wait_lock;   /* protects the irq masks */
        dispc_irq_t irq_mask;   /* enabled irqs in addition to wait_list */
-
-       struct drm_atomic_state *saved_state;
 };
 
+#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
+
 int tidss_runtime_get(struct tidss_device *tidss);
 void tidss_runtime_put(struct tidss_device *tidss);
 
index 83785b0a66a9d28740fed7848b6559bbb99dce0f..4c0558286f5e68c0028b48413bcf657793fea1f2 100644 (file)
@@ -8,8 +8,9 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_panel.h>
 #include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "tidss_crtc.h"
 #include "tidss_drv.h"
@@ -59,10 +60,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
        .atomic_check = tidss_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
                                         u32 encoder_type, u32 possible_crtcs)
 {
@@ -75,8 +72,7 @@ struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
 
        enc->possible_crtcs = possible_crtcs;
 
-       ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
-                              encoder_type, NULL);
+       ret = drm_simple_encoder_init(&tidss->ddev, enc, encoder_type);
        if (ret < 0)
                return ERR_PTR(ret);
 
index 612c046738e5f3dc6d86d5b15260a424fca380d4..1b80f2d62e0aeaa098daf4835c793f4a86156fa7 100644 (file)
@@ -23,7 +23,7 @@ static void tidss_irq_update(struct tidss_device *tidss)
 void tidss_irq_enable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        u32 hw_videoport = tcrtc->hw_videoport;
        unsigned long flags;
@@ -38,7 +38,7 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
 void tidss_irq_disable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        u32 hw_videoport = tcrtc->hw_videoport;
        unsigned long flags;
@@ -53,7 +53,7 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
 irqreturn_t tidss_irq_handler(int irq, void *arg)
 {
        struct drm_device *ddev = (struct drm_device *)arg;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned int id;
        dispc_irq_t irqstatus;
 
@@ -95,7 +95,7 @@ void tidss_irq_resume(struct tidss_device *tidss)
 
 void tidss_irq_preinstall(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        spin_lock_init(&tidss->wait_lock);
 
@@ -109,7 +109,7 @@ void tidss_irq_preinstall(struct drm_device *ddev)
 
 int tidss_irq_postinstall(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned long flags;
        unsigned int i;
 
@@ -138,7 +138,7 @@ int tidss_irq_postinstall(struct drm_device *ddev)
 
 void tidss_irq_uninstall(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        tidss_runtime_get(tidss);
        dispc_set_irqenable(tidss->dispc, 0);
index 7d419960b030934fc39a205c71113478abc38a9a..4b99e9fa84a5bf801920b6ca6863346557c260e3 100644 (file)
@@ -25,7 +25,7 @@
 static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
 {
        struct drm_device *ddev = old_state->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
 
@@ -258,7 +258,9 @@ int tidss_modeset_init(struct tidss_device *tidss)
 
        dev_dbg(tidss->dev, "%s\n", __func__);
 
-       drm_mode_config_init(ddev);
+       ret = drmm_mode_config_init(ddev);
+       if (ret)
+               return ret;
 
        ddev->mode_config.min_width = 8;
        ddev->mode_config.min_height = 8;
@@ -270,11 +272,11 @@ int tidss_modeset_init(struct tidss_device *tidss)
 
        ret = tidss_dispc_modeset_init(tidss);
        if (ret)
-               goto err_mode_config_cleanup;
+               return ret;
 
        ret = drm_vblank_init(ddev, tidss->num_crtcs);
        if (ret)
-               goto err_mode_config_cleanup;
+               return ret;
 
        /* Start with vertical blanking interrupt reporting disabled. */
        for (i = 0; i < tidss->num_crtcs; ++i)
@@ -285,15 +287,4 @@ int tidss_modeset_init(struct tidss_device *tidss)
        dev_dbg(tidss->dev, "%s done\n", __func__);
 
        return 0;
-
-err_mode_config_cleanup:
-       drm_mode_config_cleanup(ddev);
-       return ret;
-}
-
-void tidss_modeset_cleanup(struct tidss_device *tidss)
-{
-       struct drm_device *ddev = &tidss->ddev;
-
-       drm_mode_config_cleanup(ddev);
 }
index dda5625d01283d034870c1b13643d03251e08b7f..99aaff099f22980a1c384fab3fcfe01ca097caf4 100644 (file)
@@ -10,6 +10,5 @@
 struct tidss_device;
 
 int tidss_modeset_init(struct tidss_device *tidss);
-void tidss_modeset_cleanup(struct tidss_device *tidss);
 
 #endif
index ff99b2dd4a17f8681ee0c9f7aa6ecfa2b6837803..23bb3e59504b62773905cdcbb299f1427a4e085e 100644 (file)
@@ -22,7 +22,7 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
        struct drm_device *ddev = plane->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_plane *tplane = to_tidss_plane(plane);
        const struct drm_format_info *finfo;
        struct drm_crtc_state *crtc_state;
@@ -101,7 +101,7 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
                                      struct drm_plane_state *old_state)
 {
        struct drm_device *ddev = plane->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_plane *tplane = to_tidss_plane(plane);
        struct drm_plane_state *state = plane->state;
        u32 hw_videoport;
@@ -133,7 +133,7 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
                                       struct drm_plane_state *old_state)
 {
        struct drm_device *ddev = plane->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_plane *tplane = to_tidss_plane(plane);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
index 0791a0200cc3c5690d234c612ec6e0fcf6f04d2d..a5e9ee4c7fbf4203477e7283c5aea128d2240b7f 100644 (file)
@@ -390,10 +390,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
        ret = drm_dev_register(ddev, 0);
        if (ret)
                goto init_failed;
+       priv->is_registered = true;
 
        drm_fbdev_generic_setup(ddev, bpp);
-
-       priv->is_registered = true;
        return 0;
 
 init_failed:
@@ -478,26 +477,17 @@ static struct drm_info_list tilcdc_debugfs_list[] = {
                { "mm",   tilcdc_mm_show,   0 },
 };
 
-static int tilcdc_debugfs_init(struct drm_minor *minor)
+static void tilcdc_debugfs_init(struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
        struct tilcdc_module *mod;
-       int ret;
 
-       ret = drm_debugfs_create_files(tilcdc_debugfs_list,
-                       ARRAY_SIZE(tilcdc_debugfs_list),
-                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(tilcdc_debugfs_list,
+                                ARRAY_SIZE(tilcdc_debugfs_list),
+                                minor->debugfs_root, minor);
 
        list_for_each_entry(mod, &module_list, list)
                if (mod->funcs->debugfs_init)
                        mod->funcs->debugfs_init(mod, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
-               return ret;
-       }
-
-       return ret;
 }
 #endif
 
index 28b7f703236e88401acf4783eca50f7ea08ddb20..b177525588c14ac6c96562e4f6b40ac5975e6143 100644 (file)
@@ -10,6 +10,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_external.h"
@@ -83,10 +84,6 @@ int tilcdc_add_component_encoder(struct drm_device *ddev)
        return 0;
 }
 
-static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 static
 int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
 {
@@ -131,9 +128,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
        if (!priv->external_encoder)
                return -ENOMEM;
 
-       ret = drm_encoder_init(ddev, priv->external_encoder,
-                              &tilcdc_external_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(ddev, priv->external_encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret) {
                dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
                return ret;
index 5584e656b8575822f9c76cb51ab2156cef0c904c..12823d60c4e89542f38dd61139ff7ba7523b944b 100644 (file)
@@ -16,6 +16,7 @@
 #include <drm/drm_connector.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_panel.h"
@@ -74,10 +75,6 @@ static void panel_encoder_mode_set(struct drm_encoder *encoder,
        /* nothing needed */
 }
 
-static const struct drm_encoder_funcs panel_encoder_funcs = {
-               .destroy        = drm_encoder_cleanup,
-};
-
 static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
                .dpms           = panel_encoder_dpms,
                .prepare        = panel_encoder_prepare,
@@ -102,8 +99,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
        encoder = &panel_encoder->base;
        encoder->possible_crtcs = 1;
 
-       ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
-                       DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
        if (ret < 0)
                goto fail;
 
index 4160e74e4751d8908a6e84ef0a9e3f56fc364eec..2b6414f0fa75957e0ce70065625993cc81cd2ff6 100644 (file)
@@ -1,5 +1,24 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
+config DRM_CIRRUS_QEMU
+       tristate "Cirrus driver for QEMU emulated device"
+       depends on DRM && PCI && MMU
+       select DRM_KMS_HELPER
+       select DRM_GEM_SHMEM_HELPER
+       help
+        This is a KMS driver for emulated cirrus device in qemu.
+        It is *NOT* intended for real cirrus devices. This requires
+        the modesetting userspace X.org driver.
+
+        Cirrus is obsolete, the hardware was designed in the 90ies
+        and can't keep up with todays needs.  More background:
+        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+        Better alternatives are:
+          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
+
 config DRM_GM12U320
        tristate "GM12U320 driver for USB projectors"
        depends on DRM && USB
index c96ceee71453b3b56a4850922023c2cef15d8eae..6ae4e9e5a35fbd883dda1777fd4941f27bef3dd8 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
+obj-$(CONFIG_DRM_CIRRUS_QEMU)          += cirrus.o
 obj-$(CONFIG_DRM_GM12U320)             += gm12u320.o
 obj-$(CONFIG_TINYDRM_HX8357D)          += hx8357d.o
 obj-$(CONFIG_TINYDRM_ILI9225)          += ili9225.o
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
new file mode 100644 (file)
index 0000000..744a8e3
--- /dev/null
@@ -0,0 +1,660 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2012-2019 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *         Dave Airlie
+ *         Gerd Hoffmann
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <video/cirrus.h>
+#include <video/vga.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu cirrus vga"
+#define DRIVER_DATE "2019"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 0
+
+#define CIRRUS_MAX_PITCH (0x1FF << 3)      /* (4096 - 1) & ~111b bytes */
+#define CIRRUS_VRAM_SIZE (4 * 1024 * 1024) /* 4 MB */
+
+struct cirrus_device {
+       struct drm_device              dev;
+       struct drm_simple_display_pipe pipe;
+       struct drm_connector           conn;
+       unsigned int                   cpp;
+       unsigned int                   pitch;
+       void __iomem                   *vram;
+       void __iomem                   *mmio;
+};
+
+#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
+
+/* ------------------------------------------------------------------ */
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+static u8 rreg_seq(struct cirrus_device *cirrus, u8 reg)
+{
+       iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+       return ioread8(cirrus->mmio + SEQ_DATA);
+}
+
+static void wreg_seq(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+       iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+       iowrite8(val, cirrus->mmio + SEQ_DATA);
+}
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+static u8 rreg_crt(struct cirrus_device *cirrus, u8 reg)
+{
+       iowrite8(reg, cirrus->mmio + CRT_INDEX);
+       return ioread8(cirrus->mmio + CRT_DATA);
+}
+
+static void wreg_crt(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+       iowrite8(reg, cirrus->mmio + CRT_INDEX);
+       iowrite8(val, cirrus->mmio + CRT_DATA);
+}
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+static void wreg_gfx(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+       iowrite8(reg, cirrus->mmio + GFX_INDEX);
+       iowrite8(val, cirrus->mmio + GFX_DATA);
+}
+
+#define VGA_DAC_MASK  0x06
+
+static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
+{
+       ioread8(cirrus->mmio + VGA_DAC_MASK);
+       ioread8(cirrus->mmio + VGA_DAC_MASK);
+       ioread8(cirrus->mmio + VGA_DAC_MASK);
+       ioread8(cirrus->mmio + VGA_DAC_MASK);
+       iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
+}
+
+static int cirrus_convert_to(struct drm_framebuffer *fb)
+{
+       if (fb->format->cpp[0] == 4 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
+               if (fb->width * 3 <= CIRRUS_MAX_PITCH)
+                       /* convert from XR24 to RG24 */
+                       return 3;
+               else
+                       /* convert from XR24 to RG16 */
+                       return 2;
+       }
+       return 0;
+}
+
+static int cirrus_cpp(struct drm_framebuffer *fb)
+{
+       int convert_cpp = cirrus_convert_to(fb);
+
+       if (convert_cpp)
+               return convert_cpp;
+       return fb->format->cpp[0];
+}
+
+static int cirrus_pitch(struct drm_framebuffer *fb)
+{
+       int convert_cpp = cirrus_convert_to(fb);
+
+       if (convert_cpp)
+               return convert_cpp * fb->width;
+       return fb->pitches[0];
+}
+
+static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
+{
+       int idx;
+       u32 addr;
+       u8 tmp;
+
+       if (!drm_dev_enter(&cirrus->dev, &idx))
+               return;
+
+       addr = offset >> 2;
+       wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
+       wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
+
+       tmp = rreg_crt(cirrus, 0x1b);
+       tmp &= 0xf2;
+       tmp |= (addr >> 16) & 0x01;
+       tmp |= (addr >> 15) & 0x0c;
+       wreg_crt(cirrus, 0x1b, tmp);
+
+       tmp = rreg_crt(cirrus, 0x1d);
+       tmp &= 0x7f;
+       tmp |= (addr >> 12) & 0x80;
+       wreg_crt(cirrus, 0x1d, tmp);
+
+       drm_dev_exit(idx);
+}
+
+static int cirrus_mode_set(struct cirrus_device *cirrus,
+                          struct drm_display_mode *mode,
+                          struct drm_framebuffer *fb)
+{
+       int hsyncstart, hsyncend, htotal, hdispend;
+       int vtotal, vdispend;
+       int tmp, idx;
+       int sr07 = 0, hdr = 0;
+
+       if (!drm_dev_enter(&cirrus->dev, &idx))
+               return -1;
+
+       htotal = mode->htotal / 8;
+       hsyncend = mode->hsync_end / 8;
+       hsyncstart = mode->hsync_start / 8;
+       hdispend = mode->hdisplay / 8;
+
+       vtotal = mode->vtotal;
+       vdispend = mode->vdisplay;
+
+       vdispend -= 1;
+       vtotal -= 2;
+
+       htotal -= 5;
+       hdispend -= 1;
+       hsyncstart += 1;
+       hsyncend += 1;
+
+       wreg_crt(cirrus, VGA_CRTC_V_SYNC_END, 0x20);
+       wreg_crt(cirrus, VGA_CRTC_H_TOTAL, htotal);
+       wreg_crt(cirrus, VGA_CRTC_H_DISP, hdispend);
+       wreg_crt(cirrus, VGA_CRTC_H_SYNC_START, hsyncstart);
+       wreg_crt(cirrus, VGA_CRTC_H_SYNC_END, hsyncend);
+       wreg_crt(cirrus, VGA_CRTC_V_TOTAL, vtotal & 0xff);
+       wreg_crt(cirrus, VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+       tmp = 0x40;
+       if ((vdispend + 1) & 512)
+               tmp |= 0x20;
+       wreg_crt(cirrus, VGA_CRTC_MAX_SCAN, tmp);
+
+       /*
+        * Overflow bits for values that don't fit in the standard registers
+        */
+       tmp = 0x10;
+       if (vtotal & 0x100)
+               tmp |= 0x01;
+       if (vdispend & 0x100)
+               tmp |= 0x02;
+       if ((vdispend + 1) & 0x100)
+               tmp |= 0x08;
+       if (vtotal & 0x200)
+               tmp |= 0x20;
+       if (vdispend & 0x200)
+               tmp |= 0x40;
+       wreg_crt(cirrus, VGA_CRTC_OVERFLOW, tmp);
+
+       tmp = 0;
+
+       /* More overflow bits */
+
+       if ((htotal + 5) & 0x40)
+               tmp |= 0x10;
+       if ((htotal + 5) & 0x80)
+               tmp |= 0x20;
+       if (vtotal & 0x100)
+               tmp |= 0x40;
+       if (vtotal & 0x200)
+               tmp |= 0x80;
+
+       wreg_crt(cirrus, CL_CRT1A, tmp);
+
+       /* Disable Hercules/CGA compatibility */
+       wreg_crt(cirrus, VGA_CRTC_MODE, 0x03);
+
+       sr07 = rreg_seq(cirrus, 0x07);
+       sr07 &= 0xe0;
+       hdr = 0;
+
+       cirrus->cpp = cirrus_cpp(fb);
+       switch (cirrus->cpp * 8) {
+       case 8:
+               sr07 |= 0x11;
+               break;
+       case 16:
+               sr07 |= 0x17;
+               hdr = 0xc1;
+               break;
+       case 24:
+               sr07 |= 0x15;
+               hdr = 0xc5;
+               break;
+       case 32:
+               sr07 |= 0x19;
+               hdr = 0xc5;
+               break;
+       default:
+               drm_dev_exit(idx);
+               return -1;
+       }
+
+       wreg_seq(cirrus, 0x7, sr07);
+
+       /* Program the pitch */
+       cirrus->pitch = cirrus_pitch(fb);
+       tmp = cirrus->pitch / 8;
+       wreg_crt(cirrus, VGA_CRTC_OFFSET, tmp);
+
+       /* Enable extended blanking and pitch bits, and enable full memory */
+       tmp = 0x22;
+       tmp |= (cirrus->pitch >> 7) & 0x10;
+       tmp |= (cirrus->pitch >> 6) & 0x40;
+       wreg_crt(cirrus, 0x1b, tmp);
+
+       /* Enable high-colour modes */
+       wreg_gfx(cirrus, VGA_GFX_MODE, 0x40);
+
+       /* And set graphics mode */
+       wreg_gfx(cirrus, VGA_GFX_MISC, 0x01);
+
+       wreg_hdr(cirrus, hdr);
+
+       cirrus_set_start_address(cirrus, 0);
+
+       /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+       outb(0x20, 0x3c0);
+
+       drm_dev_exit(idx);
+       return 0;
+}
+
+static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
+                              struct drm_rect *rect)
+{
+       struct cirrus_device *cirrus = to_cirrus(fb->dev);
+       void *vmap;
+       int idx, ret;
+
+       ret = -ENODEV;
+       if (!drm_dev_enter(&cirrus->dev, &idx))
+               goto out;
+
+       ret = -ENOMEM;
+       vmap = drm_gem_shmem_vmap(fb->obj[0]);
+       if (!vmap)
+               goto out_dev_exit;
+
+       if (cirrus->cpp == fb->format->cpp[0])
+               drm_fb_memcpy_dstclip(cirrus->vram,
+                                     vmap, fb, rect);
+
+       else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2)
+               drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram,
+                                                 cirrus->pitch,
+                                                 vmap, fb, rect, false);
+
+       else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3)
+               drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram,
+                                                 cirrus->pitch,
+                                                 vmap, fb, rect);
+
+       else
+               WARN_ON_ONCE("cpp mismatch");
+
+       drm_gem_shmem_vunmap(fb->obj[0], vmap);
+       ret = 0;
+
+out_dev_exit:
+       drm_dev_exit(idx);
+out:
+       return ret;
+}
+
+static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
+{
+       struct drm_rect fullscreen = {
+               .x1 = 0,
+               .x2 = fb->width,
+               .y1 = 0,
+               .y2 = fb->height,
+       };
+       return cirrus_fb_blit_rect(fb, &fullscreen);
+}
+
+static int cirrus_check_size(int width, int height,
+                            struct drm_framebuffer *fb)
+{
+       int pitch = width * 2;
+
+       if (fb)
+               pitch = cirrus_pitch(fb);
+
+       if (pitch > CIRRUS_MAX_PITCH)
+               return -EINVAL;
+       if (pitch * height > CIRRUS_VRAM_SIZE)
+               return -EINVAL;
+       return 0;
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus connector                                                  */
+
+static int cirrus_conn_get_modes(struct drm_connector *conn)
+{
+       int count;
+
+       count = drm_add_modes_noedid(conn,
+                                    conn->dev->mode_config.max_width,
+                                    conn->dev->mode_config.max_height);
+       drm_set_preferred_mode(conn, 1024, 768);
+       return count;
+}
+
+static const struct drm_connector_helper_funcs cirrus_conn_helper_funcs = {
+       .get_modes = cirrus_conn_get_modes,
+};
+
+static const struct drm_connector_funcs cirrus_conn_funcs = {
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int cirrus_conn_init(struct cirrus_device *cirrus)
+{
+       drm_connector_helper_add(&cirrus->conn, &cirrus_conn_helper_funcs);
+       return drm_connector_init(&cirrus->dev, &cirrus->conn,
+                                 &cirrus_conn_funcs, DRM_MODE_CONNECTOR_VGA);
+
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus (simple) display pipe                                              */
+
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+                                                  const struct drm_display_mode *mode)
+{
+       if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
+               return MODE_BAD;
+       return MODE_OK;
+}
+
+static int cirrus_pipe_check(struct drm_simple_display_pipe *pipe,
+                            struct drm_plane_state *plane_state,
+                            struct drm_crtc_state *crtc_state)
+{
+       struct drm_framebuffer *fb = plane_state->fb;
+
+       if (!fb)
+               return 0;
+       return cirrus_check_size(fb->width, fb->height, fb);
+}
+
+static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
+                              struct drm_crtc_state *crtc_state,
+                              struct drm_plane_state *plane_state)
+{
+       struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
+
+       cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
+       cirrus_fb_blit_fullscreen(plane_state->fb);
+}
+
+static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
+                              struct drm_plane_state *old_state)
+{
+       struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
+       struct drm_plane_state *state = pipe->plane.state;
+       struct drm_crtc *crtc = &pipe->crtc;
+       struct drm_rect rect;
+
+       if (pipe->plane.state->fb &&
+           cirrus->cpp != cirrus_cpp(pipe->plane.state->fb))
+               cirrus_mode_set(cirrus, &crtc->mode,
+                               pipe->plane.state->fb);
+
+       if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+               cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
+}
+
+static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
+       .mode_valid = cirrus_pipe_mode_valid,
+       .check      = cirrus_pipe_check,
+       .enable     = cirrus_pipe_enable,
+       .update     = cirrus_pipe_update,
+};
+
+static const uint32_t cirrus_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t cirrus_modifiers[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+static int cirrus_pipe_init(struct cirrus_device *cirrus)
+{
+       return drm_simple_display_pipe_init(&cirrus->dev,
+                                           &cirrus->pipe,
+                                           &cirrus_pipe_funcs,
+                                           cirrus_formats,
+                                           ARRAY_SIZE(cirrus_formats),
+                                           cirrus_modifiers,
+                                           &cirrus->conn);
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus framebuffers & mode config                                 */
+
+static struct drm_framebuffer*
+cirrus_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+                const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       if (mode_cmd->pixel_format != DRM_FORMAT_RGB565 &&
+           mode_cmd->pixel_format != DRM_FORMAT_RGB888 &&
+           mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+               return ERR_PTR(-EINVAL);
+       if (cirrus_check_size(mode_cmd->width, mode_cmd->height, NULL) < 0)
+               return ERR_PTR(-EINVAL);
+       return drm_gem_fb_create_with_dirty(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
+       .fb_create = cirrus_fb_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int cirrus_mode_config_init(struct cirrus_device *cirrus)
+{
+       struct drm_device *dev = &cirrus->dev;
+       int ret;
+
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+       dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
+       dev->mode_config.max_height = 1024;
+       dev->mode_config.preferred_depth = 16;
+       dev->mode_config.prefer_shadow = 0;
+       dev->mode_config.funcs = &cirrus_mode_config_funcs;
+
+       return 0;
+}
+
+/* ------------------------------------------------------------------ */
+
+DEFINE_DRM_GEM_FOPS(cirrus_fops);
+
+static struct drm_driver cirrus_driver = {
+       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+       .name            = DRIVER_NAME,
+       .desc            = DRIVER_DESC,
+       .date            = DRIVER_DATE,
+       .major           = DRIVER_MAJOR,
+       .minor           = DRIVER_MINOR,
+
+       .fops            = &cirrus_fops,
+       DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static int cirrus_pci_probe(struct pci_dev *pdev,
+                           const struct pci_device_id *ent)
+{
+       struct drm_device *dev;
+       struct cirrus_device *cirrus;
+       int ret;
+
+       ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
+       if (ret)
+               return ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       ret = pci_request_regions(pdev, DRIVER_NAME);
+       if (ret)
+               return ret;
+
+       ret = -ENOMEM;
+       cirrus = devm_drm_dev_alloc(&pdev->dev, &cirrus_driver,
+                                   struct cirrus_device, dev);
+       if (IS_ERR(cirrus))
+               return PTR_ERR(cirrus);
+
+       dev = &cirrus->dev;
+
+       cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0),
+                                   pci_resource_len(pdev, 0));
+       if (cirrus->vram == NULL)
+               return -ENOMEM;
+
+       cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1),
+                                   pci_resource_len(pdev, 1));
+       if (cirrus->mmio == NULL)
+               return -ENOMEM;
+
+       ret = cirrus_mode_config_init(cirrus);
+       if (ret)
+               return ret;
+
+       ret = cirrus_conn_init(cirrus);
+       if (ret < 0)
+               return ret;
+
+       ret = cirrus_pipe_init(cirrus);
+       if (ret < 0)
+               return ret;
+
+       drm_mode_config_reset(dev);
+
+       dev->pdev = pdev;
+       pci_set_drvdata(pdev, dev);
+       ret = drm_dev_register(dev, 0);
+       if (ret)
+               return ret;
+
+       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+       return 0;
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       drm_dev_unplug(dev);
+       drm_atomic_helper_shutdown(dev);
+}
+
+static const struct pci_device_id pciidlist[] = {
+       {
+               .vendor    = PCI_VENDOR_ID_CIRRUS,
+               .device    = PCI_DEVICE_ID_CIRRUS_5446,
+               /* only bind to the cirrus chip in qemu */
+               .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+               .subdevice = PCI_SUBDEVICE_ID_QEMU,
+       }, {
+               .vendor    = PCI_VENDOR_ID_CIRRUS,
+               .device    = PCI_DEVICE_ID_CIRRUS_5446,
+               .subvendor = PCI_VENDOR_ID_XEN,
+               .subdevice = 0x0001,
+       },
+       { /* end if list */ }
+};
+
+static struct pci_driver cirrus_pci_driver = {
+       .name = DRIVER_NAME,
+       .id_table = pciidlist,
+       .probe = cirrus_pci_probe,
+       .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+       if (vgacon_text_force())
+               return -EINVAL;
+       return pci_register_driver(&cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+       pci_unregister_driver(&cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_LICENSE("GPL");
index a48173441ae0db23de8f7d62f03c80ebe4cc4512..cc397671f6898851a73a04ebf9e802b84847fcb9 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
@@ -87,18 +88,18 @@ struct gm12u320_device {
        struct usb_device               *udev;
        unsigned char                   *cmd_buf;
        unsigned char                   *data_buf[GM12U320_BLOCK_COUNT];
-       bool                             pipe_enabled;
        struct {
-               bool                     run;
-               struct workqueue_struct *workq;
-               struct work_struct       work;
-               wait_queue_head_t        waitq;
+               struct delayed_work       work;
                struct mutex             lock;
                struct drm_framebuffer  *fb;
                struct drm_rect          rect;
+               int frame;
+               int draw_status_timeout;
        } fb_update;
 };
 
+#define to_gm12u320(__dev) container_of(__dev, struct gm12u320_device, dev)
+
 static const char cmd_data[CMD_SIZE] = {
        0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
        0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
@@ -159,7 +160,7 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
        int i, block_size;
        const char *hdr;
 
-       gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL);
+       gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL);
        if (!gm12u320->cmd_buf)
                return -ENOMEM;
 
@@ -172,7 +173,8 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
                        hdr = data_block_header;
                }
 
-               gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL);
+               gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev,
+                                                    block_size, GFP_KERNEL);
                if (!gm12u320->data_buf[i])
                        return -ENOMEM;
 
@@ -182,26 +184,9 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
                       data_block_footer, DATA_BLOCK_FOOTER_SIZE);
        }
 
-       gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME);
-       if (!gm12u320->fb_update.workq)
-               return -ENOMEM;
-
        return 0;
 }
 
-static void gm12u320_usb_free(struct gm12u320_device *gm12u320)
-{
-       int i;
-
-       if (gm12u320->fb_update.workq)
-               destroy_workqueue(gm12u320->fb_update.workq);
-
-       for (i = 0; i < GM12U320_BLOCK_COUNT; i++)
-               kfree(gm12u320->data_buf[i]);
-
-       kfree(gm12u320->cmd_buf);
-}
-
 static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
                                 u8 req_a, u8 req_b,
                                 u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
@@ -344,80 +329,77 @@ unlock:
 static void gm12u320_fb_update_work(struct work_struct *work)
 {
        struct gm12u320_device *gm12u320 =
-               container_of(work, struct gm12u320_device, fb_update.work);
-       int draw_status_timeout = FIRST_FRAME_TIMEOUT;
+               container_of(to_delayed_work(work), struct gm12u320_device,
+                            fb_update.work);
        int block, block_size, len;
-       int frame = 0;
        int ret = 0;
 
-       while (gm12u320->fb_update.run) {
-               gm12u320_copy_fb_to_blocks(gm12u320);
-
-               for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
-                       if (block == GM12U320_BLOCK_COUNT - 1)
-                               block_size = DATA_LAST_BLOCK_SIZE;
-                       else
-                               block_size = DATA_BLOCK_SIZE;
-
-                       /* Send data command to device */
-                       memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
-                       gm12u320->cmd_buf[8] = block_size & 0xff;
-                       gm12u320->cmd_buf[9] = block_size >> 8;
-                       gm12u320->cmd_buf[20] = 0xfc - block * 4;
-                       gm12u320->cmd_buf[21] = block | (frame << 7);
-
-                       ret = usb_bulk_msg(gm12u320->udev,
-                               usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
-                               gm12u320->cmd_buf, CMD_SIZE, &len,
-                               CMD_TIMEOUT);
-                       if (ret || len != CMD_SIZE)
-                               goto err;
-
-                       /* Send data block to device */
-                       ret = usb_bulk_msg(gm12u320->udev,
-                               usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
-                               gm12u320->data_buf[block], block_size,
-                               &len, DATA_TIMEOUT);
-                       if (ret || len != block_size)
-                               goto err;
-
-                       /* Read status */
-                       ret = usb_bulk_msg(gm12u320->udev,
-                               usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
-                               gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
-                               CMD_TIMEOUT);
-                       if (ret || len != READ_STATUS_SIZE)
-                               goto err;
-               }
+       gm12u320_copy_fb_to_blocks(gm12u320);
+
+       for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
+               if (block == GM12U320_BLOCK_COUNT - 1)
+                       block_size = DATA_LAST_BLOCK_SIZE;
+               else
+                       block_size = DATA_BLOCK_SIZE;
+
+               /* Send data command to device */
+               memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
+               gm12u320->cmd_buf[8] = block_size & 0xff;
+               gm12u320->cmd_buf[9] = block_size >> 8;
+               gm12u320->cmd_buf[20] = 0xfc - block * 4;
+               gm12u320->cmd_buf[21] =
+                       block | (gm12u320->fb_update.frame << 7);
 
-               /* Send draw command to device */
-               memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
                ret = usb_bulk_msg(gm12u320->udev,
                        usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
-                       gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+                       gm12u320->cmd_buf, CMD_SIZE, &len,
+                       CMD_TIMEOUT);
                if (ret || len != CMD_SIZE)
                        goto err;
 
+               /* Send data block to device */
+               ret = usb_bulk_msg(gm12u320->udev,
+                       usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+                       gm12u320->data_buf[block], block_size,
+                       &len, DATA_TIMEOUT);
+               if (ret || len != block_size)
+                       goto err;
+
                /* Read status */
                ret = usb_bulk_msg(gm12u320->udev,
                        usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
                        gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
-                       draw_status_timeout);
+                       CMD_TIMEOUT);
                if (ret || len != READ_STATUS_SIZE)
                        goto err;
-
-               draw_status_timeout = CMD_TIMEOUT;
-               frame = !frame;
-
-               /*
-                * We must draw a frame every 2s otherwise the projector
-                * switches back to showing its logo.
-                */
-               wait_event_timeout(gm12u320->fb_update.waitq,
-                                  !gm12u320->fb_update.run ||
-                                       gm12u320->fb_update.fb != NULL,
-                                  IDLE_TIMEOUT);
        }
+
+       /* Send draw command to device */
+       memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
+       ret = usb_bulk_msg(gm12u320->udev,
+               usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+               gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+       if (ret || len != CMD_SIZE)
+               goto err;
+
+       /* Read status */
+       ret = usb_bulk_msg(gm12u320->udev,
+               usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+               gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+               gm12u320->fb_update.draw_status_timeout);
+       if (ret || len != READ_STATUS_SIZE)
+               goto err;
+
+       gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT;
+       gm12u320->fb_update.frame = !gm12u320->fb_update.frame;
+
+       /*
+        * We must draw a frame every 2s otherwise the projector
+        * switches back to showing its logo.
+        */
+       queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
+                          IDLE_TIMEOUT);
+
        return;
 err:
        /* Do not log errors caused by module unload or device unplug */
@@ -428,7 +410,7 @@ err:
 static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
                                   struct drm_rect *dirty)
 {
-       struct gm12u320_device *gm12u320 = fb->dev->dev_private;
+       struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev);
        struct drm_framebuffer *old_fb = NULL;
        bool wakeup = false;
 
@@ -452,36 +434,24 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
        mutex_unlock(&gm12u320->fb_update.lock);
 
        if (wakeup)
-               wake_up(&gm12u320->fb_update.waitq);
+               mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0);
 
        if (old_fb)
                drm_framebuffer_put(old_fb);
 }
 
-static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320)
-{
-       mutex_lock(&gm12u320->fb_update.lock);
-       gm12u320->fb_update.run = true;
-       mutex_unlock(&gm12u320->fb_update.lock);
-
-       queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work);
-}
-
 static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
 {
-       mutex_lock(&gm12u320->fb_update.lock);
-       gm12u320->fb_update.run = false;
-       mutex_unlock(&gm12u320->fb_update.lock);
+       struct drm_framebuffer *old_fb;
 
-       wake_up(&gm12u320->fb_update.waitq);
-       cancel_work_sync(&gm12u320->fb_update.work);
+       cancel_delayed_work_sync(&gm12u320->fb_update.work);
 
        mutex_lock(&gm12u320->fb_update.lock);
-       if (gm12u320->fb_update.fb) {
-               drm_framebuffer_put(gm12u320->fb_update.fb);
-               gm12u320->fb_update.fb = NULL;
-       }
+       old_fb = gm12u320->fb_update.fb;
+       gm12u320->fb_update.fb = NULL;
        mutex_unlock(&gm12u320->fb_update.lock);
+
+       drm_framebuffer_put(old_fb);
 }
 
 static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
@@ -589,20 +559,18 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
                                 struct drm_crtc_state *crtc_state,
                                 struct drm_plane_state *plane_state)
 {
-       struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
        struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
+       struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
 
+       gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT;
        gm12u320_fb_mark_dirty(plane_state->fb, &rect);
-       gm12u320_start_fb_update(gm12u320);
-       gm12u320->pipe_enabled = true;
 }
 
 static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
 {
-       struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+       struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
 
        gm12u320_stop_fb_update(gm12u320);
-       gm12u320->pipe_enabled = false;
 }
 
 static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -630,16 +598,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
        DRM_FORMAT_MOD_INVALID
 };
 
-static void gm12u320_driver_release(struct drm_device *dev)
-{
-       struct gm12u320_device *gm12u320 = dev->dev_private;
-
-       gm12u320_usb_free(gm12u320);
-       drm_mode_config_cleanup(dev);
-       drm_dev_fini(dev);
-       kfree(gm12u320);
-}
-
 DEFINE_DRM_GEM_FOPS(gm12u320_fops);
 
 static struct drm_driver gm12u320_drm_driver = {
@@ -651,7 +609,6 @@ static struct drm_driver gm12u320_drm_driver = {
        .major           = DRIVER_MAJOR,
        .minor           = DRIVER_MINOR,
 
-       .release         = gm12u320_driver_release,
        .fops            = &gm12u320_fops,
        DRM_GEM_SHMEM_DRIVER_OPS,
 };
@@ -676,24 +633,21 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
        if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       gm12u320 = kzalloc(sizeof(*gm12u320), GFP_KERNEL);
-       if (gm12u320 == NULL)
-               return -ENOMEM;
+       gm12u320 = devm_drm_dev_alloc(&interface->dev, &gm12u320_drm_driver,
+                                     struct gm12u320_device, dev);
+       if (IS_ERR(gm12u320))
+               return PTR_ERR(gm12u320);
 
        gm12u320->udev = interface_to_usbdev(interface);
-       INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+       INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
        mutex_init(&gm12u320->fb_update.lock);
-       init_waitqueue_head(&gm12u320->fb_update.waitq);
 
        dev = &gm12u320->dev;
-       ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev);
-       if (ret) {
-               kfree(gm12u320);
+
+       ret = drmm_mode_config_init(dev);
+       if (ret)
                return ret;
-       }
-       dev->dev_private = gm12u320;
 
-       drm_mode_config_init(dev);
        dev->mode_config.min_width = GM12U320_USER_WIDTH;
        dev->mode_config.max_width = GM12U320_USER_WIDTH;
        dev->mode_config.min_height = GM12U320_HEIGHT;
@@ -702,15 +656,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
 
        ret = gm12u320_usb_alloc(gm12u320);
        if (ret)
-               goto err_put;
+               return ret;
 
        ret = gm12u320_set_ecomode(gm12u320);
        if (ret)
-               goto err_put;
+               return ret;
 
        ret = gm12u320_conn_init(gm12u320);
        if (ret)
-               goto err_put;
+               return ret;
 
        ret = drm_simple_display_pipe_init(&gm12u320->dev,
                                           &gm12u320->pipe,
@@ -720,56 +674,44 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
                                           gm12u320_pipe_modifiers,
                                           &gm12u320->conn);
        if (ret)
-               goto err_put;
+               return ret;
 
        drm_mode_config_reset(dev);
 
        usb_set_intfdata(interface, dev);
        ret = drm_dev_register(dev, 0);
        if (ret)
-               goto err_put;
+               return ret;
 
        drm_fbdev_generic_setup(dev, 0);
 
        return 0;
-
-err_put:
-       drm_dev_put(dev);
-       return ret;
 }
 
 static void gm12u320_usb_disconnect(struct usb_interface *interface)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
-       struct gm12u320_device *gm12u320 = dev->dev_private;
 
-       gm12u320_stop_fb_update(gm12u320);
        drm_dev_unplug(dev);
-       drm_dev_put(dev);
+       drm_atomic_helper_shutdown(dev);
 }
 
 static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
                                           pm_message_t message)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
-       struct gm12u320_device *gm12u320 = dev->dev_private;
 
-       if (gm12u320->pipe_enabled)
-               gm12u320_stop_fb_update(gm12u320);
-
-       return 0;
+       return drm_mode_config_helper_suspend(dev);
 }
 
 static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
-       struct gm12u320_device *gm12u320 = dev->dev_private;
+       struct gm12u320_device *gm12u320 = to_gm12u320(dev);
 
        gm12u320_set_ecomode(gm12u320);
-       if (gm12u320->pipe_enabled)
-               gm12u320_start_fb_update(gm12u320);
 
-       return 0;
+       return drm_mode_config_helper_resume(dev);
 }
 
 static const struct usb_device_id id_table[] = {
index 9af8ff84974f5ca99a8095c5965640bef46249fb..b4bc358a3269a7363d3471ed86757fa872b8b7ea 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 #include <video/mipi_display.h>
@@ -195,7 +196,6 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
 static struct drm_driver hx8357d_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &hx8357d_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "hx8357d",
@@ -226,18 +226,12 @@ static int hx8357d_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &hx8357d_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
        if (IS_ERR(dc)) {
index 802fb8dde1b61b98a047de9f2a508aac1e0b23fc..d1a5ab6747d5cd8012d3c1876ffbf3f57aea82cf 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_rect.h>
 
@@ -345,7 +346,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
 static struct drm_driver ili9225_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &ili9225_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .name                   = "ili9225",
        .desc                   = "Ilitek ILI9225",
@@ -376,19 +376,13 @@ static int ili9225_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &ili9225_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index 33b51dc7faa87359b3b8886ebfa7aa33908a375b..bb819f45a5d3b5d8969aa6df36db567ef8d36be6 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 #include <video/mipi_display.h>
@@ -151,7 +152,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
 static struct drm_driver ili9341_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &ili9341_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "ili9341",
@@ -183,19 +183,13 @@ static int ili9341_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &ili9341_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index 532560aebb1e0fa2f08ec33e5f500588f054facf..2702ea557d297568cb0521c6ad34329e38d20e7d 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 
@@ -164,7 +165,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
 static struct drm_driver ili9486_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &ili9486_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "ili9486",
@@ -197,19 +197,13 @@ static int ili9486_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &ili9486_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index e2cfd9a171436cd6fe752f35ad9744fa3fec9c73..08ac549ab0f7fc3bc4f5b27f5d0b1277b3aadb8d 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 #include <video/mipi_display.h>
@@ -155,7 +156,6 @@ DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
 static struct drm_driver mi0283qt_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &mi0283qt_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "mi0283qt",
@@ -187,19 +187,13 @@ static int mi0283qt_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &mi0283qt_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index f5ebcaf7ee3a0631e1e6a882bb9e2fb3211d65b0..1c0e7169545b4ae4e8ea7c95e60c15217edb5214 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/drm_format_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_rect.h>
 #include <drm/drm_probe_helper.h>
@@ -908,17 +909,6 @@ static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
        .atomic_commit = drm_atomic_helper_commit,
 };
 
-static void repaper_release(struct drm_device *drm)
-{
-       struct repaper_epd *epd = drm_to_epd(drm);
-
-       DRM_DEBUG_DRIVER("\n");
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(epd);
-}
-
 static const uint32_t repaper_formats[] = {
        DRM_FORMAT_XRGB8888,
 };
@@ -956,7 +946,6 @@ DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
 static struct drm_driver repaper_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &repaper_fops,
-       .release                = repaper_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .name                   = "repaper",
        .desc                   = "Pervasive Displays RePaper e-ink panels",
@@ -1013,19 +1002,16 @@ static int repaper_probe(struct spi_device *spi)
                }
        }
 
-       epd = kzalloc(sizeof(*epd), GFP_KERNEL);
-       if (!epd)
-               return -ENOMEM;
+       epd = devm_drm_dev_alloc(dev, &repaper_driver,
+                                struct repaper_epd, drm);
+       if (IS_ERR(epd))
+               return PTR_ERR(epd);
 
        drm = &epd->drm;
 
-       ret = devm_drm_dev_init(dev, drm, &repaper_driver);
-       if (ret) {
-               kfree(epd);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
                return ret;
-       }
-
-       drm_mode_config_init(drm);
        drm->mode_config.funcs = &repaper_mode_config_funcs;
 
        epd->spi = spi;
index 9ef559dd3191cfd459ab7f497cc983f1d0548976..2a1fae422f7a210e68fc06da4722d31886d6a115 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_format_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_rect.h>
 
@@ -284,7 +285,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
 static struct drm_driver st7586_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &st7586_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "st7586",
@@ -317,19 +317,13 @@ static int st7586_probe(struct spi_device *spi)
        size_t bufsize;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &st7586_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &st7586_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
 
index 3cd9b8d9888d639607da4701ae9fbad4f17a34d1..0af1b15efdf8a284f321057b6e22fa5d6fb0fbfb 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 
 #define ST7735R_FRMCTR1                0xb1
@@ -156,7 +157,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
 static struct drm_driver st7735r_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &st7735r_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "st7735r",
@@ -195,22 +195,16 @@ static int st7735r_probe(struct spi_device *spi)
        if (!cfg)
                cfg = (void *)spi_get_device_id(spi)->driver_data;
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       priv = devm_drm_dev_alloc(dev, &st7735r_driver,
+                                 struct st7735r_priv, dbidev.drm);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
        dbidev = &priv->dbidev;
        priv->cfg = cfg;
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index 9e07c3f75156ba1d7a48bccea82528eca1c7359b..f73b81c2576e1c05f65a9020a0934a5c8ddd35f5 100644 (file)
@@ -588,7 +588,8 @@ static void ttm_bo_release(struct kref *kref)
                ttm_mem_io_unlock(man);
        }
 
-       if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+       if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+           !dma_resv_trylock(bo->base.resv)) {
                /* The BO is not idle, resurrect it for delayed destroy */
                ttm_bo_flush_all_fences(bo);
                bo->deleted = true;
@@ -621,6 +622,7 @@ static void ttm_bo_release(struct kref *kref)
        spin_unlock(&ttm_bo_glob.lru_lock);
 
        ttm_bo_cleanup_memtype_use(bo);
+       dma_resv_unlock(bo->base.resv);
 
        BUG_ON(bo->mem.mm_node != NULL);
        atomic_dec(&ttm_bo_glob.bo_count);
index 0afdfb0d1fe1197d78115cff72830c1848842a23..cdc1c42e16695215f28b6f4acec56a0a85e4eea0 100644 (file)
@@ -59,7 +59,7 @@ static int udl_get_modes(struct drm_connector *connector)
 static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
                          struct drm_display_mode *mode)
 {
-       struct udl_device *udl = connector->dev->dev_private;
+       struct udl_device *udl = to_udl(connector->dev);
        if (!udl->sku_pixel_limit)
                return 0;
 
@@ -72,7 +72,7 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 udl_detect(struct drm_connector *connector, bool force)
 {
-       struct udl_device *udl = connector->dev->dev_private;
+       struct udl_device *udl = to_udl(connector->dev);
        struct udl_drm_connector *udl_connector =
                                        container_of(connector,
                                        struct udl_drm_connector,
index e6c1cd77d4d4a0d3f803364209c19da309e65b97..d1aa50fd6d65ab2fedcd7765b9f2928f4ec0f600 100644 (file)
@@ -10,6 +10,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
 #include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_print.h>
@@ -33,17 +34,8 @@ static int udl_usb_resume(struct usb_interface *interface)
 
 DEFINE_DRM_GEM_FOPS(udl_driver_fops);
 
-static void udl_driver_release(struct drm_device *dev)
-{
-       udl_fini(dev);
-       udl_modeset_cleanup(dev);
-       drm_dev_fini(dev);
-       kfree(dev);
-}
-
 static struct drm_driver driver = {
        .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
-       .release = udl_driver_release,
 
        /* gem hooks */
        .gem_create_object = udl_driver_gem_create_object,
@@ -65,27 +57,19 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface)
        struct udl_device *udl;
        int r;
 
-       udl = kzalloc(sizeof(*udl), GFP_KERNEL);
-       if (!udl)
-               return ERR_PTR(-ENOMEM);
-
-       r = drm_dev_init(&udl->drm, &driver, &interface->dev);
-       if (r) {
-               kfree(udl);
-               return ERR_PTR(r);
-       }
+       udl = devm_drm_dev_alloc(&interface->dev, &driver,
+                                struct udl_device, drm);
+       if (IS_ERR(udl))
+               return udl;
 
        udl->udev = udev;
-       udl->drm.dev_private = udl;
 
        r = udl_init(udl);
-       if (r) {
-               drm_dev_fini(&udl->drm);
-               kfree(udl);
+       if (r)
                return ERR_PTR(r);
-       }
 
        usb_set_intfdata(interface, udl);
+
        return udl;
 }
 
@@ -101,31 +85,22 @@ static int udl_usb_probe(struct usb_interface *interface,
 
        r = drm_dev_register(&udl->drm, 0);
        if (r)
-               goto err_free;
+               return r;
 
        DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
 
-       r = drm_fbdev_generic_setup(&udl->drm, 0);
-       if (r)
-               goto err_drm_dev_unregister;
+       drm_fbdev_generic_setup(&udl->drm, 0);
 
        return 0;
-
-err_drm_dev_unregister:
-       drm_dev_unregister(&udl->drm);
-err_free:
-       drm_dev_put(&udl->drm);
-       return r;
 }
 
 static void udl_usb_disconnect(struct usb_interface *interface)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
 
-       drm_kms_helper_poll_disable(dev);
+       drm_kms_helper_poll_fini(dev);
        udl_drop_usb(dev);
        drm_dev_unplug(dev);
-       drm_dev_put(dev);
 }
 
 /*
index e67227c44cc444eb0bc3a9cfb1479ae4a4111ab8..2642f94a63fc8e80d82c47750b528898b5d183b4 100644 (file)
@@ -68,7 +68,6 @@ struct udl_device {
 
 /* modeset */
 int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_cleanup(struct drm_device *dev);
 struct drm_connector *udl_connector_init(struct drm_device *dev);
 
 struct urb *udl_get_urb(struct drm_device *dev);
@@ -77,7 +76,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
 void udl_urb_completion(struct urb *urb);
 
 int udl_init(struct udl_device *udl);
-void udl_fini(struct drm_device *dev);
 
 int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
                     const char *front, char **urb_buf_ptr,
index 538718919916a7c7247009657a74ce90230f0a7a..f5d27f2a5654341ca1d6deb9f066165a5613de62 100644 (file)
@@ -351,13 +351,3 @@ int udl_drop_usb(struct drm_device *dev)
        udl_free_urb_list(dev);
        return 0;
 }
-
-void udl_fini(struct drm_device *dev)
-{
-       struct udl_device *udl = to_udl(dev);
-
-       drm_kms_helper_poll_fini(dev);
-
-       if (udl->urbs.count)
-               udl_free_urb_list(dev);
-}
index d59ebac70b150482c3f386d2f4ed54ac2a925f3b..fef43f4e3bac4f0b74f7922d23770c22e349cb01 100644 (file)
@@ -215,7 +215,7 @@ static char *udl_dummy_render(char *wrptr)
 static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct urb *urb;
        char *buf;
        int retval;
@@ -266,8 +266,8 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
        return 0;
 }
 
-int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
-                     int width, int height)
+static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+                            int width, int height)
 {
        struct drm_device *dev = fb->dev;
        struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
@@ -369,7 +369,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_framebuffer *fb = plane_state->fb;
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct drm_display_mode *mode = &crtc_state->mode;
        char *buf;
        char *wrptr;
@@ -464,11 +464,13 @@ static const struct drm_mode_config_funcs udl_mode_funcs = {
 int udl_modeset_init(struct drm_device *dev)
 {
        size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct drm_connector *connector;
        int ret;
 
-       drm_mode_config_init(dev);
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
 
        dev->mode_config.min_width = 640;
        dev->mode_config.min_height = 480;
@@ -482,10 +484,8 @@ int udl_modeset_init(struct drm_device *dev)
        dev->mode_config.funcs = &udl_mode_funcs;
 
        connector = udl_connector_init(dev);
-       if (IS_ERR(connector)) {
-               ret = PTR_ERR(connector);
-               goto err_drm_mode_config_cleanup;
-       }
+       if (IS_ERR(connector))
+               return PTR_ERR(connector);
 
        format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
 
@@ -494,18 +494,9 @@ int udl_modeset_init(struct drm_device *dev)
                                           udl_simple_display_pipe_formats,
                                           format_count, NULL, connector);
        if (ret)
-               goto err_drm_mode_config_cleanup;
+               return ret;
 
        drm_mode_config_reset(dev);
 
        return 0;
-
-err_drm_mode_config_cleanup:
-       drm_mode_config_cleanup(dev);
-       return ret;
-}
-
-void udl_modeset_cleanup(struct drm_device *dev)
-{
-       drm_mode_config_cleanup(dev);
 }
index 9e953ce64ef753632a644c0ea8ac87433951ff5b..e76b24bb88285e31c28d7ed4d378891fbf8ef2e8 100644 (file)
@@ -132,7 +132,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
        u32 ident0, ident1, ident2, ident3, cores;
        int ret, core;
 
-       ret = pm_runtime_get_sync(v3d->dev);
+       ret = pm_runtime_get_sync(v3d->drm.dev);
        if (ret < 0)
                return ret;
 
@@ -187,8 +187,8 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
                           (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
        }
 
-       pm_runtime_mark_last_busy(v3d->dev);
-       pm_runtime_put_autosuspend(v3d->dev);
+       pm_runtime_mark_last_busy(v3d->drm.dev);
+       pm_runtime_put_autosuspend(v3d->drm.dev);
 
        return 0;
 }
@@ -219,7 +219,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
        int measure_ms = 1000;
        int ret;
 
-       ret = pm_runtime_get_sync(v3d->dev);
+       ret = pm_runtime_get_sync(v3d->drm.dev);
        if (ret < 0)
                return ret;
 
@@ -245,8 +245,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
                   cycles / (measure_ms * 1000),
                   (cycles / (measure_ms * 100)) % 10);
 
-       pm_runtime_mark_last_busy(v3d->dev);
-       pm_runtime_put_autosuspend(v3d->dev);
+       pm_runtime_mark_last_busy(v3d->drm.dev);
+       pm_runtime_put_autosuspend(v3d->drm.dev);
 
        return 0;
 }
@@ -258,10 +258,10 @@ static const struct drm_info_list v3d_debugfs_list[] = {
        {"bo_stats", v3d_debugfs_bo_stats, 0},
 };
 
-int
+void
 v3d_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(v3d_debugfs_list,
-                                       ARRAY_SIZE(v3d_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(v3d_debugfs_list,
+                                ARRAY_SIZE(v3d_debugfs_list),
+                                minor->debugfs_root, minor);
 }
index eaa8e9682373e8d40c3a97dd18dea8b4d9baa41b..82a7dfdd14c2a76c73babbc3ed7ef0afda5b99cc 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
 #include <uapi/drm/v3d_drm.h>
 
 #include "v3d_drv.h"
@@ -104,7 +105,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
                if (args->value != 0)
                        return -EINVAL;
 
-               ret = pm_runtime_get_sync(v3d->dev);
+               ret = pm_runtime_get_sync(v3d->drm.dev);
                if (ret < 0)
                        return ret;
                if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
@@ -113,8 +114,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
                } else {
                        args->value = V3D_READ(offset);
                }
-               pm_runtime_mark_last_busy(v3d->dev);
-               pm_runtime_put_autosuspend(v3d->dev);
+               pm_runtime_mark_last_busy(v3d->drm.dev);
+               pm_runtime_put_autosuspend(v3d->drm.dev);
                return 0;
        }
 
@@ -234,9 +235,9 @@ static int
 map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
 {
        struct resource *res =
-               platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
+               platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
 
-       *regs = devm_ioremap_resource(v3d->dev, res);
+       *regs = devm_ioremap_resource(v3d->drm.dev, res);
        return PTR_ERR_OR_ZERO(*regs);
 }
 
@@ -250,20 +251,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
        u32 ident1;
 
 
-       v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
-       if (!v3d)
-               return -ENOMEM;
-       v3d->dev = dev;
-       v3d->pdev = pdev;
+       v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
+       if (IS_ERR(v3d))
+               return PTR_ERR(v3d);
+
        drm = &v3d->drm;
 
+       platform_set_drvdata(pdev, drm);
+
        ret = map_regs(v3d, &v3d->hub_regs, "hub");
        if (ret)
-               goto dev_free;
+               return ret;
 
        ret = map_regs(v3d, &v3d->core_regs[0], "core0");
        if (ret)
-               goto dev_free;
+               return ret;
 
        mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
        dev->coherent_dma_mask =
@@ -281,45 +283,37 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
                ret = PTR_ERR(v3d->reset);
 
                if (ret == -EPROBE_DEFER)
-                       goto dev_free;
+                       return ret;
 
                v3d->reset = NULL;
                ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
                if (ret) {
                        dev_err(dev,
                                "Failed to get reset control or bridge regs\n");
-                       goto dev_free;
+                       return ret;
                }
        }
 
        if (v3d->ver < 41) {
                ret = map_regs(v3d, &v3d->gca_regs, "gca");
                if (ret)
-                       goto dev_free;
+                       return ret;
        }
 
        v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
                                        GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
        if (!v3d->mmu_scratch) {
                dev_err(dev, "Failed to allocate MMU scratch page\n");
-               ret = -ENOMEM;
-               goto dev_free;
+               return -ENOMEM;
        }
 
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_autosuspend_delay(dev, 50);
        pm_runtime_enable(dev);
 
-       ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
-       if (ret)
-               goto dma_free;
-
-       platform_set_drvdata(pdev, drm);
-       drm->dev_private = v3d;
-
        ret = v3d_gem_init(drm);
        if (ret)
-               goto dev_destroy;
+               goto dma_free;
 
        ret = v3d_irq_init(v3d);
        if (ret)
@@ -335,12 +329,8 @@ irq_disable:
        v3d_irq_disable(v3d);
 gem_destroy:
        v3d_gem_destroy(drm);
-dev_destroy:
-       drm_dev_put(drm);
 dma_free:
        dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
-dev_free:
-       kfree(v3d);
        return ret;
 }
 
@@ -353,9 +343,8 @@ static int v3d_platform_drm_remove(struct platform_device *pdev)
 
        v3d_gem_destroy(drm);
 
-       drm_dev_put(drm);
-
-       dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+       dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
+                   v3d->mmu_scratch_paddr);
 
        return 0;
 }
index ac260333458738379b057618db5f4b11542360f2..8a390738d65baf20189d799d1481d4cd21f636f0 100644 (file)
@@ -14,7 +14,6 @@
 #include "uapi/drm/v3d_drm.h"
 
 struct clk;
-struct device;
 struct platform_device;
 struct reset_control;
 
@@ -47,8 +46,6 @@ struct v3d_dev {
        int ver;
        bool single_irq_line;
 
-       struct device *dev;
-       struct platform_device *pdev;
        void __iomem *hub_regs;
        void __iomem *core_regs[3];
        void __iomem *bridge_regs;
@@ -121,7 +118,7 @@ struct v3d_dev {
 static inline struct v3d_dev *
 to_v3d_dev(struct drm_device *dev)
 {
-       return (struct v3d_dev *)dev->dev_private;
+       return container_of(dev, struct v3d_dev, drm);
 }
 
 static inline bool
@@ -130,6 +127,8 @@ v3d_has_csd(struct v3d_dev *v3d)
        return v3d->ver >= 41;
 }
 
+#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
+
 /* The per-fd struct, which tracks the MMU mappings. */
 struct v3d_file_priv {
        struct v3d_dev *v3d;
@@ -316,7 +315,7 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
                                                 struct sg_table *sgt);
 
 /* v3d_debugfs.c */
-int v3d_debugfs_init(struct drm_minor *minor);
+void v3d_debugfs_init(struct drm_minor *minor);
 
 /* v3d_fence.c */
 extern const struct dma_fence_ops v3d_fence_ops;
index 549dde83408b0ffed1b0204702978b351e17917a..09a7639cf161f59fcfe34d23ebea036838801315 100644 (file)
@@ -370,8 +370,8 @@ v3d_job_free(struct kref *ref)
        dma_fence_put(job->irq_fence);
        dma_fence_put(job->done_fence);
 
-       pm_runtime_mark_last_busy(job->v3d->dev);
-       pm_runtime_put_autosuspend(job->v3d->dev);
+       pm_runtime_mark_last_busy(job->v3d->drm.dev);
+       pm_runtime_put_autosuspend(job->v3d->drm.dev);
 
        kfree(job);
 }
@@ -439,7 +439,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
        job->v3d = v3d;
        job->free = free;
 
-       ret = pm_runtime_get_sync(v3d->dev);
+       ret = pm_runtime_get_sync(v3d->drm.dev);
        if (ret < 0)
                return ret;
 
@@ -458,7 +458,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
        return 0;
 fail:
        xa_destroy(&job->deps);
-       pm_runtime_put_autosuspend(v3d->dev);
+       pm_runtime_put_autosuspend(v3d->drm.dev);
        return ret;
 }
 
@@ -886,12 +886,12 @@ v3d_gem_init(struct drm_device *dev)
         */
        drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
 
-       v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
+       v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
                               &v3d->pt_paddr,
                               GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
        if (!v3d->pt) {
                drm_mm_takedown(&v3d->mm);
-               dev_err(v3d->dev,
+               dev_err(v3d->drm.dev,
                        "Failed to allocate page tables. "
                        "Please ensure you have CMA enabled.\n");
                return -ENOMEM;
@@ -903,7 +903,7 @@ v3d_gem_init(struct drm_device *dev)
        ret = v3d_sched_init(v3d);
        if (ret) {
                drm_mm_takedown(&v3d->mm);
-               dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
+               dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
                                  v3d->pt_paddr);
        }
 
@@ -925,5 +925,6 @@ v3d_gem_destroy(struct drm_device *dev)
 
        drm_mm_takedown(&v3d->mm);
 
-       dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
+       dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
+                         v3d->pt_paddr);
 }
index 662e67279a7bb40554a02e68588e6e538556c8af..51b65263c6571a2b088230e94c989482bd88cfcb 100644 (file)
@@ -128,7 +128,7 @@ v3d_irq(int irq, void *arg)
         * always-allowed mode.
         */
        if (intsts & V3D_INT_GMPV)
-               dev_err(v3d->dev, "GMP violation\n");
+               dev_err(v3d->drm.dev, "GMP violation\n");
 
        /* V3D 4.2 wires the hub and core IRQs together, so if we &
         * didn't see the common one then check hub for MMU IRQs.
@@ -189,7 +189,7 @@ v3d_hub_irq(int irq, void *arg)
                                client = v3d41_axi_ids[axi_id];
                }
 
-               dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+               dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
                        client, axi_id, (long long)vio_addr,
                        ((intsts & V3D_HUB_INT_MMU_WRV) ?
                         ", write violation" : ""),
@@ -217,16 +217,17 @@ v3d_irq_init(struct v3d_dev *v3d)
                V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
        V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
 
-       irq1 = platform_get_irq(v3d->pdev, 1);
+       irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
        if (irq1 == -EPROBE_DEFER)
                return irq1;
        if (irq1 > 0) {
-               ret = devm_request_irq(v3d->dev, irq1,
+               ret = devm_request_irq(v3d->drm.dev, irq1,
                                       v3d_irq, IRQF_SHARED,
                                       "v3d_core0", v3d);
                if (ret)
                        goto fail;
-               ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+               ret = devm_request_irq(v3d->drm.dev,
+                                      platform_get_irq(v3d_to_pdev(v3d), 0),
                                       v3d_hub_irq, IRQF_SHARED,
                                       "v3d_hub", v3d);
                if (ret)
@@ -234,7 +235,8 @@ v3d_irq_init(struct v3d_dev *v3d)
        } else {
                v3d->single_irq_line = true;
 
-               ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+               ret = devm_request_irq(v3d->drm.dev,
+                                      platform_get_irq(v3d_to_pdev(v3d), 0),
                                       v3d_irq, IRQF_SHARED,
                                       "v3d", v3d);
                if (ret)
@@ -246,7 +248,7 @@ v3d_irq_init(struct v3d_dev *v3d)
 
 fail:
        if (ret != -EPROBE_DEFER)
-               dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+               dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
        return ret;
 }
 
index 395e81d97163211196dc8d9a5ef7817e8999a9fd..3b81ea28c0bbc8d1a13bde4e025c47be296c176f 100644 (file)
@@ -40,7 +40,7 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
        ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
                         V3D_MMU_CTL_TLB_CLEARING), 100);
        if (ret)
-               dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
+               dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
 
        V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
                  V3D_MMU_CTL_TLB_CLEAR);
@@ -52,14 +52,14 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
        ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
                         V3D_MMU_CTL_TLB_CLEARING), 100);
        if (ret) {
-               dev_err(v3d->dev, "TLB clear wait idle failed\n");
+               dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
                return ret;
        }
 
        ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
                         V3D_MMUC_CONTROL_FLUSHING), 100);
        if (ret)
-               dev_err(v3d->dev, "MMUC flush wait idle failed\n");
+               dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
 
        return ret;
 }
@@ -109,7 +109,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
                     shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
 
        if (v3d_mmu_flush_all(v3d))
-               dev_err(v3d->dev, "MMU flush timeout\n");
+               dev_err(v3d->drm.dev, "MMU flush timeout\n");
 }
 
 void v3d_mmu_remove_ptes(struct v3d_bo *bo)
@@ -122,5 +122,5 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo)
                v3d->pt[page] = 0;
 
        if (v3d_mmu_flush_all(v3d))
-               dev_err(v3d->dev, "MMU flush timeout\n");
+               dev_err(v3d->drm.dev, "MMU flush timeout\n");
 }
index 8c2df6d95283e46598d18c66e75c14c80bd3ee5c..0747614a78f0bc4bed7c1f3734c14655a0346415 100644 (file)
@@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             msecs_to_jiffies(hang_limit_ms),
                             "v3d_bin");
        if (ret) {
-               dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
+               dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
                return ret;
        }
 
@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             msecs_to_jiffies(hang_limit_ms),
                             "v3d_render");
        if (ret) {
-               dev_err(v3d->dev, "Failed to create render scheduler: %d.",
+               dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
                        ret);
                v3d_sched_fini(v3d);
                return ret;
@@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             msecs_to_jiffies(hang_limit_ms),
                             "v3d_tfu");
        if (ret) {
-               dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+               dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
                        ret);
                v3d_sched_fini(v3d);
                return ret;
@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                                     msecs_to_jiffies(hang_limit_ms),
                                     "v3d_csd");
                if (ret) {
-                       dev_err(v3d->dev, "Failed to create CSD scheduler: %d.",
+                       dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
                                ret);
                        v3d_sched_fini(v3d);
                        return ret;
@@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                                     msecs_to_jiffies(hang_limit_ms),
                                     "v3d_cache_clean");
                if (ret) {
-                       dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.",
+                       dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
                                ret);
                        v3d_sched_fini(v3d);
                        return ret;
index ac8f75db2ecd8ae5aa892946898815d66bafb423..cf2e3e6a23881a55157ba399ec788438a3652fd4 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 
 #include "vbox_drv.h"
 
@@ -45,28 +46,22 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                return ret;
 
-       vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
-       if (!vbox)
-               return -ENOMEM;
-
-       ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
-       if (ret) {
-               kfree(vbox);
-               return ret;
-       }
+       vbox = devm_drm_dev_alloc(&pdev->dev, &driver,
+                                 struct vbox_private, ddev);
+       if (IS_ERR(vbox))
+               return PTR_ERR(vbox);
 
        vbox->ddev.pdev = pdev;
-       vbox->ddev.dev_private = vbox;
        pci_set_drvdata(pdev, vbox);
        mutex_init(&vbox->hw_mutex);
 
-       ret = pci_enable_device(pdev);
+       ret = pcim_enable_device(pdev);
        if (ret)
-               goto err_dev_put;
+               return ret;
 
        ret = vbox_hw_init(vbox);
        if (ret)
-               goto err_pci_disable;
+               return ret;
 
        ret = vbox_mm_init(vbox);
        if (ret)
@@ -80,14 +75,12 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto err_mode_fini;
 
-       ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
-       if (ret)
-               goto err_irq_fini;
-
        ret = drm_dev_register(&vbox->ddev, 0);
        if (ret)
                goto err_irq_fini;
 
+       drm_fbdev_generic_setup(&vbox->ddev, 32);
+
        return 0;
 
 err_irq_fini:
@@ -98,10 +91,6 @@ err_mm_fini:
        vbox_mm_fini(vbox);
 err_hw_fini:
        vbox_hw_fini(vbox);
-err_pci_disable:
-       pci_disable_device(pdev);
-err_dev_put:
-       drm_dev_put(&vbox->ddev);
        return ret;
 }
 
@@ -114,7 +103,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
        vbox_mode_fini(vbox);
        vbox_mm_fini(vbox);
        vbox_hw_fini(vbox);
-       drm_dev_put(&vbox->ddev);
 }
 
 #ifdef CONFIG_PM_SLEEP
index 87421903816c04fc123c3a280163d5a8b2c451ba..ac7c2effc46f4391667548136d259056f0b1c9e4 100644 (file)
@@ -127,6 +127,7 @@ struct vbox_encoder {
 #define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
 #define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
 #define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_dev(x) container_of(x, struct vbox_private, ddev)
 
 bool vbox_check_supported(u16 id);
 int vbox_hw_init(struct vbox_private *vbox);
index 16a1e29f5292ce4d1e745749c3bc56b8d678062f..631657fa554f9a1a5b6104064697f7eec8b6c8fa 100644 (file)
@@ -34,7 +34,7 @@ void vbox_report_hotplug(struct vbox_private *vbox)
 irqreturn_t vbox_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *)arg;
-       struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(dev);
        u32 host_flags = vbox_get_flags(vbox);
 
        if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
index 9dcab115a26110fc541f9e96ef985833f878ab86..d68d9bad76747015c446b3f5908014ca81dd3b70 100644 (file)
@@ -71,8 +71,6 @@ static void vbox_accel_fini(struct vbox_private *vbox)
 
        for (i = 0; i < vbox->num_crtcs; ++i)
                vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
-
-       pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
 }
 
 /* Do we support the 4.3 plus mode hint reporting interface? */
@@ -123,21 +121,22 @@ int vbox_hw_init(struct vbox_private *vbox)
                return -ENOMEM;
 
        /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
-       vbox->guest_pool = gen_pool_create(4, -1);
+       vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
+                                               "vboxvideo-accel");
        if (!vbox->guest_pool)
-               goto err_unmap_guest_heap;
+               return -ENOMEM;
 
        ret = gen_pool_add_virt(vbox->guest_pool,
                                (unsigned long)vbox->guest_heap,
                                GUEST_HEAP_OFFSET(vbox),
                                GUEST_HEAP_USABLE_SIZE, -1);
        if (ret)
-               goto err_destroy_guest_pool;
+               return ret;
 
        ret = hgsmi_test_query_conf(vbox->guest_pool);
        if (ret) {
                DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
-               goto err_destroy_guest_pool;
+               return ret;
        }
 
        /* Reduce available VRAM size to reflect the guest heap. */
@@ -149,33 +148,23 @@ int vbox_hw_init(struct vbox_private *vbox)
 
        if (!have_hgsmi_mode_hints(vbox)) {
                ret = -ENOTSUPP;
-               goto err_destroy_guest_pool;
+               return ret;
        }
 
        vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
                                             sizeof(struct vbva_modehint),
                                             GFP_KERNEL);
-       if (!vbox->last_mode_hints) {
-               ret = -ENOMEM;
-               goto err_destroy_guest_pool;
-       }
+       if (!vbox->last_mode_hints)
+               return -ENOMEM;
 
        ret = vbox_accel_init(vbox);
        if (ret)
-               goto err_destroy_guest_pool;
+               return ret;
 
        return 0;
-
-err_destroy_guest_pool:
-       gen_pool_destroy(vbox->guest_pool);
-err_unmap_guest_heap:
-       pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
-       return ret;
 }
 
 void vbox_hw_fini(struct vbox_private *vbox)
 {
        vbox_accel_fini(vbox);
-       gen_pool_destroy(vbox->guest_pool);
-       pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
 }
index 0883a435e62b9c9e2dee84fe30145f72e73f0086..d9a5af62af890abc2cbb860da6aabb17109af54e 100644 (file)
@@ -36,7 +36,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
        u16 flags;
        s32 x_offset, y_offset;
 
-       vbox = crtc->dev->dev_private;
+       vbox = to_vbox_dev(crtc->dev);
        width = vbox_crtc->width ? vbox_crtc->width : 640;
        height = vbox_crtc->height ? vbox_crtc->height : 480;
        bpp = fb ? fb->format->cpp[0] * 8 : 32;
@@ -77,7 +77,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
 static int vbox_set_view(struct drm_crtc *crtc)
 {
        struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
-       struct vbox_private *vbox = crtc->dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(crtc->dev);
        struct vbva_infoview *p;
 
        /*
@@ -174,7 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
                                        int x, int y)
 {
        struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
-       struct vbox_private *vbox = crtc->dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(crtc->dev);
        struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
        bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
 
@@ -272,7 +272,7 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
 {
        struct drm_crtc *crtc = plane->state->crtc;
        struct drm_framebuffer *fb = plane->state->fb;
-       struct vbox_private *vbox = fb->dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(fb->dev);
        struct drm_mode_rect *clips;
        uint32_t num_clips, i;
 
@@ -704,7 +704,7 @@ static int vbox_get_modes(struct drm_connector *connector)
        int preferred_width, preferred_height;
 
        vbox_connector = to_vbox_connector(connector);
-       vbox = connector->dev->dev_private;
+       vbox = to_vbox_dev(connector->dev);
 
        hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
                                    HOST_FLAGS_OFFSET);
index 976423d0c3ccacc588fcf1e6a2a43e7b679b8736..f5a06675da43c67abcf187f7bddc9b95f88e5f3a 100644 (file)
@@ -24,25 +24,13 @@ int vbox_mm_init(struct vbox_private *vbox)
                return ret;
        }
 
-#ifdef DRM_MTRR_WC
-       vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
-                                    pci_resource_len(dev->pdev, 0),
-                                    DRM_MTRR_WC);
-#else
        vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
                                         pci_resource_len(dev->pdev, 0));
-#endif
        return 0;
 }
 
 void vbox_mm_fini(struct vbox_private *vbox)
 {
-#ifdef DRM_MTRR_WC
-       drm_mtrr_del(vbox->fb_mtrr,
-                    pci_resource_start(vbox->ddev.pdev, 0),
-                    pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
-#else
        arch_phys_wc_del(vbox->fb_mtrr);
-#endif
        drm_vram_helper_release_mm(&vbox->ddev);
 }
index b61b2d3407b516c2c819584652a9051ffa5cb53c..4fbbf980a299fd3255464d1c9cd88b17580d6fcc 100644 (file)
@@ -20,7 +20,7 @@ struct vc4_debugfs_info_entry {
  * Called at drm_dev_register() time on each of the minors registered
  * by the DRM device, to attach the debugfs files.
  */
-int
+void
 vc4_debugfs_init(struct drm_minor *minor)
 {
        struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
@@ -30,14 +30,9 @@ vc4_debugfs_init(struct drm_minor *minor)
                            minor->debugfs_root, &vc4->load_tracker_enabled);
 
        list_for_each_entry(entry, &vc4->debugfs_list, link) {
-               int ret = drm_debugfs_create_files(&entry->info, 1,
-                                                  minor->debugfs_root, minor);
-
-               if (ret)
-                       return ret;
+               drm_debugfs_create_files(&entry->info, 1,
+                                        minor->debugfs_root, minor);
        }
-
-       return 0;
 }
 
 static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
index 6dfede03396efb1906a529d97d0bc2174c69b654..a90f2545baee0c7e6b541cc1d957c7dad6c286a4 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/of_graph.h>
@@ -114,10 +115,6 @@ static const struct debugfs_reg32 dpi_regs[] = {
        VC4_REG32(DPI_ID),
 };
 
-static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
 {
        struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
@@ -309,8 +306,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
        if (ret)
                DRM_ERROR("Failed to turn on core clock: %d\n", ret);
 
-       drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
-                        DRM_MODE_ENCODER_DPI, NULL);
+       drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
        drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
 
        ret = vc4_dpi_init_bridge(dpi);
index 139d25a8328e8e60927e687e36821484da64c100..3b1f02efefbe208b99622692137346b8abb900ce 100644 (file)
@@ -759,7 +759,7 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
                          unsigned int *top, unsigned int *bottom);
 
 /* vc4_debugfs.c */
-int vc4_debugfs_init(struct drm_minor *minor);
+void vc4_debugfs_init(struct drm_minor *minor);
 #ifdef CONFIG_DEBUG_FS
 void vc4_debugfs_add_file(struct drm_device *drm,
                          const char *filename,
index d99b1d52665172abf055042034df49956101c388..eaf276978ee7fb79a145868071ddd91f86fbdd12 100644 (file)
@@ -37,6 +37,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "vc4_drv.h"
 #include "vc4_regs.h"
@@ -652,15 +653,6 @@ static const struct debugfs_reg32 dsi1_regs[] = {
        VC4_REG32(DSI1_ID),
 };
 
-static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
-       .destroy = vc4_dsi_encoder_destroy,
-};
-
 static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
 {
        u32 afec0 = DSI_PORT_READ(PHY_AFEC0);
@@ -1615,8 +1607,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
        if (dsi->port == 1)
                vc4->dsi1 = dsi;
 
-       drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs,
-                        DRM_MODE_ENCODER_DSI, NULL);
+       drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
        drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
 
        ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
@@ -1656,7 +1647,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
         * normally.
         */
        list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
-       vc4_dsi_encoder_destroy(dsi->encoder);
+       drm_encoder_cleanup(dsi->encoder);
 
        if (dsi->port == 1)
                vc4->dsi1 = NULL;
index 340719238753d2becd1d01be0d909a8644392f46..625bfcf52dc4daabeda6dc0acf0313e30bfff4cd 100644 (file)
@@ -34,6 +34,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/i2c.h>
@@ -306,15 +307,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
        return connector;
 }
 
-static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
-       .destroy = vc4_hdmi_encoder_destroy,
-};
-
 static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type)
 {
@@ -1406,8 +1398,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
        }
        pm_runtime_enable(dev);
 
-       drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
 
        hdmi->connector =
@@ -1465,7 +1456,7 @@ err_destroy_conn:
        vc4_hdmi_connector_destroy(hdmi->connector);
 #endif
 err_destroy_encoder:
-       vc4_hdmi_encoder_destroy(hdmi->encoder);
+       drm_encoder_cleanup(hdmi->encoder);
 err_unprepare_hsm:
        clk_disable_unprepare(hdmi->hsm_clock);
        pm_runtime_disable(dev);
@@ -1484,7 +1475,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
 
        cec_unregister_adapter(hdmi->cec_adap);
        vc4_hdmi_connector_destroy(hdmi->connector);
-       vc4_hdmi_encoder_destroy(hdmi->encoder);
+       drm_encoder_cleanup(hdmi->encoder);
 
        clk_disable_unprepare(hdmi->hsm_clock);
        pm_runtime_disable(dev);
index 7402bc768664ccd47d66e5168769784d52fd20b8..bd5b8eb58b180696b217f914d550af1419a2978d 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/of_graph.h>
@@ -374,10 +375,6 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
        return connector;
 }
 
-static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
 {
        struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
@@ -566,8 +563,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
 
        pm_runtime_enable(dev);
 
-       drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs,
-                        DRM_MODE_ENCODER_TVDAC, NULL);
+       drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
        drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
 
        vec->connector = vc4_vec_connector_init(drm, vec);
index 909eba43664a28f857070ec7090482f53727416d..ec1a8ebb6f1bfcad766f1a0effb2c305f9e11388 100644 (file)
@@ -39,6 +39,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_prime.h>
 
 #include "vgem_drv.h"
@@ -431,9 +432,6 @@ static void vgem_release(struct drm_device *dev)
        struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
 
        platform_device_unregister(vgem->platform);
-       drm_dev_fini(&vgem->drm);
-
-       kfree(vgem);
 }
 
 static struct drm_driver vgem_driver = {
@@ -489,16 +487,19 @@ static int __init vgem_init(void)
                           &vgem_device->platform->dev);
        if (ret)
                goto out_unregister;
+       drmm_add_final_kfree(&vgem_device->drm, vgem_device);
 
        /* Final step: expose the device/driver to userspace */
-       ret  = drm_dev_register(&vgem_device->drm, 0);
+       ret = drm_dev_register(&vgem_device->drm, 0);
        if (ret)
-               goto out_fini;
+               goto out_put;
 
        return 0;
 
-out_fini:
-       drm_dev_fini(&vgem_device->drm);
+out_put:
+       drm_dev_put(&vgem_device->drm);
+       return ret;
+
 out_unregister:
        platform_device_unregister(vgem_device->platform);
 out_free:
index e27120d512b0614031a98eb3dc9ff03523ead7ee..3221520f61f0cc2ecb3f8030532e64ce2564ff04 100644 (file)
@@ -72,11 +72,10 @@ static struct drm_info_list virtio_gpu_debugfs_list[] = {
 
 #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
 
-int
+void
 virtio_gpu_debugfs_init(struct drm_minor *minor)
 {
        drm_debugfs_create_files(virtio_gpu_debugfs_list,
                                 VIRTIO_GPU_DEBUGFS_ENTRIES,
                                 minor->debugfs_root, minor);
-       return 0;
 }
index 2b7e6ae65546adaddf6ea55bb8b53c97dd580261..cc7fd957a307290293e4d32cb5076b6af9ba5bca 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "virtgpu_drv.h"
 
@@ -240,10 +241,6 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
 {
        struct drm_device *dev = vgdev->ddev;
@@ -276,8 +273,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
        if (vgdev->has_edid)
                drm_connector_attach_edid_property(connector);
 
-       drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
-                        DRM_MODE_ENCODER_VIRTUAL, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
        drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
        encoder->possible_crtcs = 1 << index;
 
index c1824bdf2418218793b04c0e11ab0e7fefdbc3fa..49bebdee6d91e3fcd37ff80b18062e10c28908c3 100644 (file)
@@ -218,26 +218,18 @@ struct virtio_gpu_fpriv {
        struct mutex context_lock;
 };
 
-/* virtio_ioctl.c */
+/* virtgpu_ioctl.c */
 #define DRM_VIRTIO_NUM_IOCTLS 10
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
 
-/* virtio_kms.c */
+/* virtgpu_kms.c */
 int virtio_gpu_init(struct drm_device *dev);
 void virtio_gpu_deinit(struct drm_device *dev);
 void virtio_gpu_release(struct drm_device *dev);
 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
 
-/* virtio_gem.c */
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
-int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_gem_create(struct drm_file *file,
-                         struct drm_device *dev,
-                         struct virtio_gpu_object_params *params,
-                         struct drm_gem_object **obj_p,
-                         uint32_t *handle_p);
+/* virtgpu_gem.c */
 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
                               struct drm_file *file);
 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
@@ -263,7 +255,7 @@ void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
                                       struct virtio_gpu_object_array *objs);
 void virtio_gpu_array_put_free_work(struct work_struct *work);
 
-/* virtio vg */
+/* virtgpu_vq.c */
 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
@@ -287,10 +279,10 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
                                uint32_t scanout_id, uint32_t resource_id,
                                uint32_t width, uint32_t height,
                                uint32_t x, uint32_t y);
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
-                            struct virtio_gpu_object *obj,
-                            struct virtio_gpu_mem_entry *ents,
-                            unsigned int nents);
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+                             struct virtio_gpu_object *obj,
+                             struct virtio_gpu_mem_entry *ents,
+                             unsigned int nents);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -343,17 +335,17 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
 
 void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
 
-/* virtio_gpu_display.c */
+/* virtgpu_display.c */
 void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
 
-/* virtio_gpu_plane.c */
+/* virtgpu_plane.c */
 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
                                        enum drm_plane_type type,
                                        int index);
 
-/* virtio_gpu_fence.c */
+/* virtgpu_fence.c */
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(
        struct virtio_gpu_device *vgdev);
 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -362,7 +354,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
                                    u64 last_seq);
 
-/* virtio_gpu_object */
+/* virtgpu_object.c */
 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
                                                size_t size);
@@ -378,7 +370,7 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
        struct drm_device *dev, struct dma_buf_attachment *attach,
        struct sg_table *sgt);
 
-/* virgl debugfs */
-int virtio_gpu_debugfs_init(struct drm_minor *minor);
+/* virtgpu_debugfs.c */
+void virtio_gpu_debugfs_init(struct drm_minor *minor);
 
 #endif
index 0d6152c99a27190f538be52eb777d7d4ce973cf8..1025658be4df23c8491cc7e2a6f11c3d4e6adc8d 100644 (file)
 
 #include "virtgpu_drv.h"
 
-int virtio_gpu_gem_create(struct drm_file *file,
-                         struct drm_device *dev,
-                         struct virtio_gpu_object_params *params,
-                         struct drm_gem_object **obj_p,
-                         uint32_t *handle_p)
+static int virtio_gpu_gem_create(struct drm_file *file,
+                                struct drm_device *dev,
+                                struct virtio_gpu_object_params *params,
+                                struct drm_gem_object **obj_p,
+                                uint32_t *handle_p)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_object *obj;
@@ -114,7 +114,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
        struct virtio_gpu_object_array *objs;
 
        if (!vgdev->has_virgl_3d)
-               return 0;
+               goto out_notify;
 
        objs = virtio_gpu_array_alloc(1);
        if (!objs)
@@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
 
        virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
                                               objs);
+out_notify:
        virtio_gpu_notify(vgdev);
        return 0;
 }
index 336cc9143205de921c4307f076903530abe58e97..867c5e239d5536f5dfc9db65ed3f4337a6eeb070 100644 (file)
@@ -47,7 +47,6 @@ static void virtio_gpu_create_context(struct drm_device *dev,
        get_task_comm(dbgname, current);
        virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
                                      strlen(dbgname), dbgname);
-       virtio_gpu_notify(vgdev);
        vfpriv->context_created = true;
 
 out_unlock:
index d9039bb7c5e3759d71e21373b127580d70554bf1..6ccbd01cd888c3daedaad691c83711530e53f2e3 100644 (file)
@@ -235,13 +235,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
                return ret;
        }
 
-       ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
-       if (ret != 0) {
-               virtio_gpu_free_object(&shmem_obj->base);
-               return ret;
-       }
+       virtio_gpu_object_attach(vgdev, bo, ents, nents);
 
-       virtio_gpu_notify(vgdev);
        *bo_ptr = bo;
        return 0;
 
index 73854915ec349b6b405cebd2813c2fd63c85711a..9e663a5d9952684f154730b83ed1680b24f2d50b 100644 (file)
@@ -1087,14 +1087,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
-                            struct virtio_gpu_object *obj,
-                            struct virtio_gpu_mem_entry *ents,
-                            unsigned int nents)
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+                             struct virtio_gpu_object *obj,
+                             struct virtio_gpu_mem_entry *ents,
+                             unsigned int nents)
 {
        virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
                                               ents, nents, NULL);
-       return 0;
 }
 
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
index 860de052e8209369e2680a8aac02135157b1d75b..1e8b2169d834123810e74d3a628118324d28e2bf 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -34,7 +35,7 @@
 
 static struct vkms_device *vkms_device;
 
-bool enable_cursor;
+bool enable_cursor = true;
 module_param_named(enable_cursor, enable_cursor, bool, 0444);
 MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
 
@@ -63,7 +64,6 @@ static void vkms_release(struct drm_device *dev)
        platform_device_unregister(vkms->platform);
        drm_atomic_helper_shutdown(&vkms->drm);
        drm_mode_config_cleanup(&vkms->drm);
-       drm_dev_fini(&vkms->drm);
        destroy_workqueue(vkms->output.composer_workq);
 }
 
@@ -158,13 +158,14 @@ static int __init vkms_init(void)
                           &vkms_device->platform->dev);
        if (ret)
                goto out_unregister;
+       drmm_add_final_kfree(&vkms_device->drm, vkms_device);
 
        ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
                                           DMA_BIT_MASK(64));
 
        if (ret) {
                DRM_ERROR("Could not initialize DMA support\n");
-               goto out_fini;
+               goto out_put;
        }
 
        vkms_device->drm.irq_enabled = true;
@@ -172,25 +173,25 @@ static int __init vkms_init(void)
        ret = drm_vblank_init(&vkms_device->drm, 1);
        if (ret) {
                DRM_ERROR("Failed to vblank\n");
-               goto out_fini;
+               goto out_put;
        }
 
        ret = vkms_modeset_init(vkms_device);
        if (ret)
-               goto out_fini;
+               goto out_put;
 
        ret = drm_dev_register(&vkms_device->drm, 0);
        if (ret)
-               goto out_fini;
+               goto out_put;
 
        return 0;
 
-out_fini:
-       drm_dev_fini(&vkms_device->drm);
+out_put:
+       drm_dev_put(&vkms_device->drm);
+       return ret;
 
 out_unregister:
        platform_device_unregister(vkms_device->platform);
-
 out_free:
        kfree(vkms_device);
        return ret;
@@ -205,8 +206,6 @@ static void __exit vkms_exit(void)
 
        drm_dev_unregister(&vkms_device->drm);
        drm_dev_put(&vkms_device->drm);
-
-       kfree(vkms_device);
 }
 
 module_init(vkms_init);
index eda04ffba7b1f15ad3321c607fac524092a9c92d..f4036bb0b9a89a51c05acdb1df2c0041af47a4ff 100644 (file)
@@ -117,11 +117,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
                                  enum drm_plane_type type, int index);
 
 /* Gem stuff */
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
-                                      struct drm_file *file,
-                                      u32 *handle,
-                                      u64 size);
-
 vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
 
 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
index 2e01186fb943b897b7b40dd29176465d2b85a1e7..c541fec575665b3414a437911a316457a916c6e1 100644 (file)
@@ -97,10 +97,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
        return ret;
 }
 
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
-                                      struct drm_file *file,
-                                      u32 *handle,
-                                      u64 size)
+static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+                                             struct drm_file *file,
+                                             u32 *handle,
+                                             u64 size)
 {
        struct vkms_gem_object *obj;
        int ret;
@@ -113,7 +113,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
                return ERR_CAST(obj);
 
        ret = drm_gem_handle_create(file, &obj->gem, handle);
-       drm_gem_object_put_unlocked(&obj->gem);
        if (ret)
                return ERR_PTR(ret);
 
@@ -142,6 +141,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
        args->size = gem_obj->size;
        args->pitch = pitch;
 
+       drm_gem_object_put_unlocked(gem_obj);
+
        DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
 
        return 0;
index fb1941a6522cfea38d8d58b419e66e1e57af5082..85afb77e97f0e6a6d9bfd8ee985c29e1b1b845eb 100644 (file)
@@ -3,6 +3,7 @@
 #include "vkms_drv.h"
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 static void vkms_connector_destroy(struct drm_connector *connector)
 {
@@ -17,10 +18,6 @@ static const struct drm_connector_funcs vkms_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static const struct drm_encoder_funcs vkms_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int vkms_conn_get_modes(struct drm_connector *connector)
 {
        int count;
@@ -70,8 +67,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
 
        drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
 
-       ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
-                              DRM_MODE_ENCODER_VIRTUAL, NULL);
+       ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
        if (ret) {
                DRM_ERROR("Failed to init encoder\n");
                goto err_encoder;
index 374142018171c98c62a4a78aad5890d6707401cf..1fd458e877caa38e9aaed8c18a64bb95ec7894ac 100644 (file)
@@ -460,9 +460,6 @@ static void xen_drm_drv_release(struct drm_device *dev)
        drm_atomic_helper_shutdown(dev);
        drm_mode_config_cleanup(dev);
 
-       drm_dev_fini(dev);
-       kfree(dev);
-
        if (front_info->cfg.be_alloc)
                xenbus_switch_state(front_info->xb_dev,
                                    XenbusStateInitialising);
@@ -561,6 +558,7 @@ fail_register:
 fail_modeset:
        drm_kms_helper_poll_fini(drm_dev);
        drm_mode_config_cleanup(drm_dev);
+       drm_dev_put(drm_dev);
 fail:
        kfree(drm_info);
        return ret;
index b98a1420dcd38a97b1e893d9d716f190c6f4c734..76a16d997a23b221b8b57af29a316bfcd840e6a1 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include <sound/hdmi-codec.h>
 
@@ -254,10 +255,6 @@ static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
        .mode_set = zx_hdmi_encoder_mode_set,
 };
 
-static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
 {
        struct zx_hdmi *hdmi = to_zx_hdmi(connector);
@@ -313,8 +310,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
 
        encoder->possible_crtcs = VOU_CRTC_MASK;
 
-       drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
 
        hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
index c598b7daf1f18f1d4d4d7f7e576070b0e479cc82..d8a89ba383bc870030bca61886fdcd4be1db2353 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "zx_drm_drv.h"
 #include "zx_tvenc_regs.h"
@@ -218,10 +219,6 @@ static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
        .mode_set = zx_tvenc_encoder_mode_set,
 };
 
-static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
 {
        struct zx_tvenc *tvenc = to_zx_tvenc(connector);
@@ -285,8 +282,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
         */
        encoder->possible_crtcs = BIT(1);
 
-       drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs,
-                        DRM_MODE_ENCODER_TVDAC, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
        drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
 
        connector->interlace_allowed = true;
index c4fa3bbaba7808574ebcb902bea5daf5884c810d..a7ed7f5ca8370ea3819770beea1ffad5bdb7e566 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "zx_drm_drv.h"
 #include "zx_vga_regs.h"
@@ -72,10 +73,6 @@ static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
        .disable = zx_vga_encoder_disable,
 };
 
-static const struct drm_encoder_funcs zx_vga_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int zx_vga_connector_get_modes(struct drm_connector *connector)
 {
        struct zx_vga *vga = to_zx_vga(connector);
@@ -154,8 +151,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
 
        encoder->possible_crtcs = VOU_CRTC_MASK;
 
-       ret = drm_encoder_init(drm, encoder, &zx_vga_encoder_funcs,
-                              DRM_MODE_ENCODER_DAC, NULL);
+       ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
        if (ret) {
                DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
                return ret;
index 0370364169c4ec2ebc09218395f41615b9f49cd5..501c43c5851dcc503edaff6c19cfc5b0d4df4a66 100644 (file)
@@ -839,6 +839,9 @@ void vmbus_initiate_unload(bool crash)
 {
        struct vmbus_channel_message_header hdr;
 
+       if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
+               return;
+
        /* Pre-Win2012R2 hosts don't support reconnect */
        if (vmbus_proto_version < VERSION_WIN8_1)
                return;
index 8a28785735820b61f6a85adca84f61022c133f3a..ccf752b6659a773584f97b133f0cb1155a1bcfc4 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "hyperv_vmbus.h"
 
-struct dentry *hv_debug_root;
+static struct dentry *hv_debug_root;
 
 static int hv_debugfs_delay_get(void *data, u64 *val)
 {
index f5fa3b3c9baf762c8c02422fbffaa88d859e865b..70b30e223a578be42378342891541b560bae627b 100644 (file)
@@ -292,7 +292,7 @@ struct vmbus_msginfo {
        struct list_head msglist_entry;
 
        /* The message itself */
-       unsigned char msg[0];
+       unsigned char msg[];
 };
 
 
index 029378c27421d1b30a168dbe25c1ba13c4c8de57..a68bce4d0ddbe925beabc2ca6a3499c6e25ea202 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kdebug.h>
 #include <linux/efi.h>
 #include <linux/random.h>
+#include <linux/kernel.h>
 #include <linux/syscore_ops.h>
 #include <clocksource/hyperv_timer.h>
 #include "hyperv_vmbus.h"
@@ -48,14 +49,35 @@ static int hyperv_cpuhp_online;
 
 static void *hv_panic_page;
 
+/*
+ * Boolean to control whether to report panic messages over Hyper-V.
+ *
+ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ */
+static int sysctl_record_panic_msg = 1;
+
+static int hyperv_report_reg(void)
+{
+       return !sysctl_record_panic_msg || !hv_panic_page;
+}
+
 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
                              void *args)
 {
        struct pt_regs *regs;
 
-       regs = current_pt_regs();
+       vmbus_initiate_unload(true);
 
-       hyperv_report_panic(regs, val);
+       /*
+        * Hyper-V should be notified only once about a panic.  If we will be
+        * doing hyperv_report_panic_msg() later with kmsg data, don't do
+        * the notification here.
+        */
+       if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
+           && hyperv_report_reg()) {
+               regs = current_pt_regs();
+               hyperv_report_panic(regs, val, false);
+       }
        return NOTIFY_DONE;
 }
 
@@ -65,7 +87,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
        struct die_args *die = (struct die_args *)args;
        struct pt_regs *regs = die->regs;
 
-       hyperv_report_panic(regs, val);
+       /*
+        * Hyper-V should be notified only once about a panic.  If we will be
+        * doing hyperv_report_panic_msg() later with kmsg data, don't do
+        * the notification here.
+        */
+       if (hyperv_report_reg())
+               hyperv_report_panic(regs, val, true);
        return NOTIFY_DONE;
 }
 
@@ -1252,13 +1280,6 @@ static void vmbus_isr(void)
        add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
 }
 
-/*
- * Boolean to control whether to report panic messages over Hyper-V.
- *
- * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
- */
-static int sysctl_record_panic_msg = 1;
-
 /*
  * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
  * buffer and call into Hyper-V to transfer the data.
@@ -1382,19 +1403,29 @@ static int vmbus_bus_init(void)
                        hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
                        if (hv_panic_page) {
                                ret = kmsg_dump_register(&hv_kmsg_dumper);
-                               if (ret)
+                               if (ret) {
                                        pr_err("Hyper-V: kmsg dump register "
                                                "error 0x%x\n", ret);
+                                       hv_free_hyperv_page(
+                                           (unsigned long)hv_panic_page);
+                                       hv_panic_page = NULL;
+                               }
                        } else
                                pr_err("Hyper-V: panic message page memory "
                                        "allocation failed");
                }
 
                register_die_notifier(&hyperv_die_block);
-               atomic_notifier_chain_register(&panic_notifier_list,
-                                              &hyperv_panic_block);
        }
 
+       /*
+        * Always register the panic notifier because we need to unload
+        * the VMbus channel connection to prevent any VMbus
+        * activity after the VM panics.
+        */
+       atomic_notifier_chain_register(&panic_notifier_list,
+                              &hyperv_panic_block);
+
        vmbus_request_offers();
 
        return 0;
@@ -1407,7 +1438,6 @@ err_alloc:
        hv_remove_vmbus_irq();
 
        bus_unregister(&hv_bus);
-       hv_free_hyperv_page((unsigned long)hv_panic_page);
        unregister_sysctl_table(hv_ctl_table_hdr);
        hv_ctl_table_hdr = NULL;
        return ret;
@@ -2204,8 +2234,6 @@ static int vmbus_bus_suspend(struct device *dev)
 
        vmbus_initiate_unload(false);
 
-       vmbus_connection.conn_state = DISCONNECTED;
-
        /* Reset the event for the next resume. */
        reinit_completion(&vmbus_connection.ready_for_resume_event);
 
@@ -2289,7 +2317,6 @@ static void hv_kexec_handler(void)
 {
        hv_stimer_global_cleanup();
        vmbus_initiate_unload(false);
-       vmbus_connection.conn_state = DISCONNECTED;
        /* Make sure conn_state is set as hv_synic_cleanup checks for it */
        mb();
        cpuhp_remove_state(hyperv_cpuhp_online);
@@ -2306,7 +2333,6 @@ static void hv_crash_handler(struct pt_regs *regs)
         * doing the cleanup for current CPU only. This should be sufficient
         * for kdump.
         */
-       vmbus_connection.conn_state = DISCONNECTED;
        cpu = smp_processor_id();
        hv_stimer_cleanup(cpu);
        hv_synic_disable_regs(cpu);
index 05a30832c6ba0a3c62411cb79d5375d5ff8b00a8..4c62f900bf7e8f6ae246d5a8b8aacf3b7d65a719 100644 (file)
@@ -412,7 +412,7 @@ config SENSORS_DRIVETEMP
          hard disk drives.
 
          This driver can also be built as a module. If so, the module
-         will be called satatemp.
+         will be called drivetemp.
 
 config SENSORS_DS620
        tristate "Dallas Semiconductor DS620"
index 370d0c74eb012e38ebfcfffbafeeb7ce7f50705f..9179460c2d9d586e1358859a485234126aa7bc8c 100644 (file)
@@ -264,12 +264,18 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
                return err;
        switch (attr) {
        case hwmon_temp_input:
+               if (!temp_is_valid(buf[SCT_STATUS_TEMP]))
+                       return -ENODATA;
                *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
                break;
        case hwmon_temp_lowest:
+               if (!temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]))
+                       return -ENODATA;
                *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
                break;
        case hwmon_temp_highest:
+               if (!temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]))
+                       return -ENODATA;
                *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
                break;
        default:
index f2d81b0558e5695f871fff22a93c8ceaff902edd..e3f1ebee71306b6ee12f69a7ebf318027df7ef38 100644 (file)
@@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
        }
        data->config = config;
 
-       hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+       hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
                                                         data, &jc42_chip_info,
                                                         NULL);
        return PTR_ERR_OR_ZERO(hwmon_dev);
index 3f37d5d81fe40e8cdb3bb3f3bb17827fc9833ff4..9915578533bb9665df32ade0956d90e077fab531 100644 (file)
@@ -186,7 +186,7 @@ static long get_raw_temp(struct k10temp_data *data)
        return temp;
 }
 
-const char *k10temp_temp_label[] = {
+static const char *k10temp_temp_label[] = {
        "Tctl",
        "Tdie",
        "Tccd1",
@@ -199,12 +199,12 @@ const char *k10temp_temp_label[] = {
        "Tccd8",
 };
 
-const char *k10temp_in_label[] = {
+static const char *k10temp_in_label[] = {
        "Vcore",
        "Vsoc",
 };
 
-const char *k10temp_curr_label[] = {
+static const char *k10temp_curr_label[] = {
        "Icore",
        "Isoc",
 };
index 4d2315208bb5982f88ab04e489a39ef1baacd74d..0c622711ef7e0e926a20980961e98322d7663d9e 100644 (file)
 #define ISL68137_VOUT_AVS      0x30
 #define RAA_DMPVR2_READ_VMON   0xc8
 
-enum versions {
+enum chips {
        isl68137,
+       isl68220,
+       isl68221,
+       isl68222,
+       isl68223,
+       isl68224,
+       isl68225,
+       isl68226,
+       isl68227,
+       isl68229,
+       isl68233,
+       isl68239,
+       isl69222,
+       isl69223,
+       isl69224,
+       isl69225,
+       isl69227,
+       isl69228,
+       isl69234,
+       isl69236,
+       isl69239,
+       isl69242,
+       isl69243,
+       isl69247,
+       isl69248,
+       isl69254,
+       isl69255,
+       isl69256,
+       isl69259,
+       isl69260,
+       isl69268,
+       isl69269,
+       isl69298,
+       raa228000,
+       raa228004,
+       raa228006,
+       raa228228,
+       raa229001,
+       raa229004,
+};
+
+enum variants {
+       raa_dmpvr1_2rail,
        raa_dmpvr2_1rail,
        raa_dmpvr2_2rail,
        raa_dmpvr2_3rail,
@@ -186,7 +228,7 @@ static int isl68137_probe(struct i2c_client *client,
        memcpy(info, &raa_dmpvr_info, sizeof(*info));
 
        switch (id->driver_data) {
-       case isl68137:
+       case raa_dmpvr1_2rail:
                info->pages = 2;
                info->R[PSC_VOLTAGE_IN] = 3;
                info->func[0] &= ~PMBUS_HAVE_VMON;
@@ -224,11 +266,47 @@ static int isl68137_probe(struct i2c_client *client,
 }
 
 static const struct i2c_device_id raa_dmpvr_id[] = {
-       {"isl68137", isl68137},
-       {"raa_dmpvr2_1rail", raa_dmpvr2_1rail},
-       {"raa_dmpvr2_2rail", raa_dmpvr2_2rail},
-       {"raa_dmpvr2_3rail", raa_dmpvr2_3rail},
-       {"raa_dmpvr2_hv", raa_dmpvr2_hv},
+       {"isl68137", raa_dmpvr1_2rail},
+       {"isl68220", raa_dmpvr2_2rail},
+       {"isl68221", raa_dmpvr2_3rail},
+       {"isl68222", raa_dmpvr2_2rail},
+       {"isl68223", raa_dmpvr2_2rail},
+       {"isl68224", raa_dmpvr2_3rail},
+       {"isl68225", raa_dmpvr2_2rail},
+       {"isl68226", raa_dmpvr2_3rail},
+       {"isl68227", raa_dmpvr2_1rail},
+       {"isl68229", raa_dmpvr2_3rail},
+       {"isl68233", raa_dmpvr2_2rail},
+       {"isl68239", raa_dmpvr2_3rail},
+
+       {"isl69222", raa_dmpvr2_2rail},
+       {"isl69223", raa_dmpvr2_3rail},
+       {"isl69224", raa_dmpvr2_2rail},
+       {"isl69225", raa_dmpvr2_2rail},
+       {"isl69227", raa_dmpvr2_3rail},
+       {"isl69228", raa_dmpvr2_3rail},
+       {"isl69234", raa_dmpvr2_2rail},
+       {"isl69236", raa_dmpvr2_2rail},
+       {"isl69239", raa_dmpvr2_3rail},
+       {"isl69242", raa_dmpvr2_2rail},
+       {"isl69243", raa_dmpvr2_1rail},
+       {"isl69247", raa_dmpvr2_2rail},
+       {"isl69248", raa_dmpvr2_2rail},
+       {"isl69254", raa_dmpvr2_2rail},
+       {"isl69255", raa_dmpvr2_2rail},
+       {"isl69256", raa_dmpvr2_2rail},
+       {"isl69259", raa_dmpvr2_2rail},
+       {"isl69260", raa_dmpvr2_2rail},
+       {"isl69268", raa_dmpvr2_2rail},
+       {"isl69269", raa_dmpvr2_3rail},
+       {"isl69298", raa_dmpvr2_2rail},
+
+       {"raa228000", raa_dmpvr2_hv},
+       {"raa228004", raa_dmpvr2_hv},
+       {"raa228006", raa_dmpvr2_hv},
+       {"raa228228", raa_dmpvr2_2rail},
+       {"raa229001", raa_dmpvr2_2rail},
+       {"raa229004", raa_dmpvr2_2rail},
        {}
 };
 
index 20ef63820c776ef5b0c58eb048e35b72af10d457..f5c00f903df3833a08b4bc90b8456d7cbcf52d6d 100644 (file)
@@ -384,7 +384,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
        struct altr_i2c_dev *idev = NULL;
        struct resource *res;
        int irq, ret;
-       u32 val;
 
        idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
        if (!idev)
@@ -411,17 +410,17 @@ static int altr_i2c_probe(struct platform_device *pdev)
        init_completion(&idev->msg_complete);
        spin_lock_init(&idev->lock);
 
-       val = device_property_read_u32(idev->dev, "fifo-size",
+       ret = device_property_read_u32(idev->dev, "fifo-size",
                                       &idev->fifo_size);
-       if (val) {
+       if (ret) {
                dev_err(&pdev->dev, "FIFO size set to default of %d\n",
                        ALTR_I2C_DFLT_FIFO_SZ);
                idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
        }
 
-       val = device_property_read_u32(idev->dev, "clock-frequency",
+       ret = device_property_read_u32(idev->dev, "clock-frequency",
                                       &idev->bus_clk_rate);
-       if (val) {
+       if (ret) {
                dev_err(&pdev->dev, "Default to 100kHz\n");
                idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;        /* default clock rate */
        }
index c98befe2a92e045c9733fa06b1e0aea48f8d2b78..5536673060cc637d505bcc15cee1c1127e721118 100644 (file)
@@ -354,10 +354,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        adap->dev.of_node = pdev->dev.of_node;
        adap->nr = -1;
 
-       dev_pm_set_driver_flags(&pdev->dev,
-                               DPM_FLAG_SMART_PREPARE |
-                               DPM_FLAG_SMART_SUSPEND |
-                               DPM_FLAG_LEAVE_SUSPENDED);
+       if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
+               dev_pm_set_driver_flags(&pdev->dev,
+                                       DPM_FLAG_SMART_PREPARE |
+                                       DPM_FLAG_LEAVE_SUSPENDED);
+       } else {
+               dev_pm_set_driver_flags(&pdev->dev,
+                                       DPM_FLAG_SMART_PREPARE |
+                                       DPM_FLAG_SMART_SUSPEND |
+                                       DPM_FLAG_LEAVE_SUSPENDED);
+       }
 
        /* The code below assumes runtime PM to be disabled. */
        WARN_ON(pm_runtime_enabled(&pdev->dev));
index 4c4d17ddc96b964125f89b49c13dd1f44f8bc457..8280ac7cc1b7df4fc2b902542027559405e4d55f 100644 (file)
@@ -996,14 +996,13 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
        do {
                u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
 
-               if (status) {
+               if (status)
                        tegra_i2c_isr(i2c_dev->irq, i2c_dev);
 
-                       if (completion_done(complete)) {
-                               s64 delta = ktime_ms_delta(ktimeout, ktime);
+               if (completion_done(complete)) {
+                       s64 delta = ktime_ms_delta(ktimeout, ktime);
 
-                               return msecs_to_jiffies(delta) ?: 1;
-                       }
+                       return msecs_to_jiffies(delta) ?: 1;
                }
 
                ktime = ktime_get();
@@ -1030,14 +1029,18 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
                disable_irq(i2c_dev->irq);
 
                /*
-                * There is a chance that completion may happen after IRQ
-                * synchronization, which is done by disable_irq().
+                * Under some rare circumstances (like running KASAN +
+                * NFS root) CPU, which handles interrupt, may stuck in
+                * uninterruptible state for a significant time.  In this
+                * case we will get timeout if I2C transfer is running on
+                * a sibling CPU, despite of IRQ being raised.
+                *
+                * In order to handle this rare condition, the IRQ status
+                * needs to be checked after timeout.
                 */
-               if (ret == 0 && completion_done(complete)) {
-                       dev_warn(i2c_dev->dev,
-                                "completion done after timeout\n");
-                       ret = 1;
-               }
+               if (ret == 0)
+                       ret = tegra_i2c_poll_completion_timeout(i2c_dev,
+                                                               complete, 0);
        }
 
        return ret;
@@ -1216,6 +1219,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
                time_left = tegra_i2c_wait_completion_timeout(
                                i2c_dev, &i2c_dev->dma_complete, xfer_time);
 
+               /*
+                * Synchronize DMA first, since dmaengine_terminate_sync()
+                * performs synchronization after the transfer's termination
+                * and we want to get a completion if transfer succeeded.
+                */
+               dmaengine_synchronize(i2c_dev->msg_read ?
+                                     i2c_dev->rx_dma_chan :
+                                     i2c_dev->tx_dma_chan);
+
                dmaengine_terminate_sync(i2c_dev->msg_read ?
                                         i2c_dev->rx_dma_chan :
                                         i2c_dev->tx_dma_chan);
index 5cc0b0ec5570bd62aa06544649f1f8375f15eb4b..a6691278206474374c68a5b8786d4cf48f92bd34 100644 (file)
@@ -2273,19 +2273,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap,
 }
 EXPORT_SYMBOL_GPL(i2c_new_scanned_device);
 
-struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
-                     struct i2c_board_info *info,
-                     unsigned short const *addr_list,
-                     int (*probe)(struct i2c_adapter *adap, unsigned short addr))
-{
-       struct i2c_client *client;
-
-       client = i2c_new_scanned_device(adap, info, addr_list, probe);
-       return IS_ERR(client) ? NULL : client;
-}
-EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-
 struct i2c_adapter *i2c_get_adapter(int nr)
 {
        struct i2c_adapter *adapter;
index eb9bce93cd057e84da94dbe73f092f497520998b..fd7c537fb42ac9f6a18c7da4b93cdb5ede304996 100644 (file)
@@ -416,7 +416,7 @@ static const struct irq_domain_ops bcm7038_l1_domain_ops = {
        .map                    = bcm7038_l1_map,
 };
 
-int __init bcm7038_l1_of_init(struct device_node *dn,
+static int __init bcm7038_l1_of_init(struct device_node *dn,
                              struct device_node *parent)
 {
        struct bcm7038_l1_chip *intc;
index 54d142ccc63a3fab9bc41419b4d89bd3862331d5..124251b0ccbae6bcb6b2d7e7b6c8b448b727733a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/dma-iommu.h>
 #include <linux/efi.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/log2.h>
@@ -3672,6 +3673,20 @@ out:
        return IRQ_SET_MASK_OK_DONE;
 }
 
+static void its_wait_vpt_parse_complete(void)
+{
+       void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+       u64 val;
+
+       if (!gic_rdists->has_vpend_valid_dirty)
+               return;
+
+       WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
+                                               val,
+                                               !(val & GICR_VPENDBASER_Dirty),
+                                               10, 500));
+}
+
 static void its_vpe_schedule(struct its_vpe *vpe)
 {
        void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@@ -3702,6 +3717,8 @@ static void its_vpe_schedule(struct its_vpe *vpe)
        val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
        val |= GICR_VPENDBASER_Valid;
        gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+       its_wait_vpt_parse_complete();
 }
 
 static void its_vpe_deschedule(struct its_vpe *vpe)
@@ -3910,6 +3927,8 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
        val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
 
        gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+       its_wait_vpt_parse_complete();
 }
 
 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
@@ -4035,6 +4054,7 @@ static int its_sgi_set_affinity(struct irq_data *d,
         * not on the host (since they can only be targetting a vPE).
         * Tell the kernel we've done whatever it asked for.
         */
+       irq_data_update_effective_affinity(d, mask_val);
        return IRQ_SET_MASK_OK;
 }
 
index 9dbc81b6f62e99dfa08ac4fe68054b0422959c86..d7006ef18a0da69be1ea8b7e78c1854616218fc5 100644 (file)
@@ -873,6 +873,7 @@ static int __gic_update_rdist_properties(struct redist_region *region,
        gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
        gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
                                           gic_data.rdists.has_rvpeid);
+       gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
 
        /* Detect non-sensical configurations */
        if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
@@ -893,10 +894,11 @@ static void gic_update_rdist_properties(void)
        if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
                gic_data.ppi_nr = 0;
        pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
-       pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
-               !gic_data.rdists.has_vlpis ? "no " : "",
-               !gic_data.rdists.has_direct_lpi ? "no " : "",
-               !gic_data.rdists.has_rvpeid ? "no " : "");
+       if (gic_data.rdists.has_vlpis)
+               pr_info("GICv4 features: %s%s%s\n",
+                       gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
+                       gic_data.rdists.has_rvpeid ? "RVPEID " : "",
+                       gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
 }
 
 /* Check whether it's single security state view */
@@ -1620,6 +1622,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
        gic_data.rdists.has_rvpeid = true;
        gic_data.rdists.has_vlpis = true;
        gic_data.rdists.has_direct_lpi = true;
+       gic_data.rdists.has_vpend_valid_dirty = true;
 
        if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
                err = -ENOMEM;
index 6b566bba263bdec3637aa7a8687cf2816e053971..ff7627b577726e6e887df1de1ef88137a5f5c4d0 100644 (file)
@@ -220,10 +220,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
        return 0;
 }
 
+static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+                                  unsigned int nr_irqs)
+{
+       platform_msi_domain_free(domain, virq, nr_irqs);
+}
+
 static const struct irq_domain_ops mbigen_domain_ops = {
        .translate      = mbigen_domain_translate,
        .alloc          = mbigen_irq_domain_alloc,
-       .free           = irq_domain_free_irqs_common,
+       .free           = mbigen_irq_domain_free,
 };
 
 static int mbigen_of_create_domain(struct platform_device *pdev,
index ccc7f823911bd3a12e4baa6eb1a8aeabbcaa455a..bc7aebcc96e9cdd1f31eecc435ca9454a40acacc 100644 (file)
@@ -144,12 +144,17 @@ struct meson_gpio_irq_controller {
 static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
                                       unsigned int reg, u32 mask, u32 val)
 {
+       unsigned long flags;
        u32 tmp;
 
+       spin_lock_irqsave(&ctl->lock, flags);
+
        tmp = readl_relaxed(ctl->base + reg);
        tmp &= ~mask;
        tmp |= val;
        writel_relaxed(tmp, ctl->base + reg);
+
+       spin_unlock_irqrestore(&ctl->lock, flags);
 }
 
 static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
@@ -196,14 +201,15 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
                               unsigned long  hwirq,
                               u32 **channel_hwirq)
 {
+       unsigned long flags;
        unsigned int idx;
 
-       spin_lock(&ctl->lock);
+       spin_lock_irqsave(&ctl->lock, flags);
 
        /* Find a free channel */
        idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
        if (idx >= NUM_CHANNEL) {
-               spin_unlock(&ctl->lock);
+               spin_unlock_irqrestore(&ctl->lock, flags);
                pr_err("No channel available\n");
                return -ENOSPC;
        }
@@ -211,6 +217,8 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
        /* Mark the channel as used */
        set_bit(idx, ctl->channel_map);
 
+       spin_unlock_irqrestore(&ctl->lock, flags);
+
        /*
         * Setup the mux of the channel to route the signal of the pad
         * to the appropriate input of the GIC
@@ -225,8 +233,6 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
         */
        *channel_hwirq = &(ctl->channel_irqs[idx]);
 
-       spin_unlock(&ctl->lock);
-
        pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
                 hwirq, idx, **channel_hwirq);
 
@@ -287,13 +293,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
                        val |= REG_EDGE_POL_LOW(params, idx);
        }
 
-       spin_lock(&ctl->lock);
-
        meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
                                   REG_EDGE_POL_MASK(params, idx), val);
 
-       spin_unlock(&ctl->lock);
-
        return 0;
 }
 
index 547045d89c4bddcc0d113785fab37c42dd19ec8f..91adf771f1859737157af823d231669e99b7f97b 100644 (file)
@@ -66,7 +66,7 @@ struct mvebu_icu_irq_data {
        unsigned int type;
 };
 
-DEFINE_STATIC_KEY_FALSE(legacy_bindings);
+static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
 
 static void mvebu_icu_init(struct mvebu_icu *icu,
                           struct mvebu_icu_msi_data *msi_data,
index c34fb3ae0ff8158890bd563649cd2a25e3718e1b..d0a71febdadce7e4abb8aafea33a4a2186c83d09 100644 (file)
@@ -56,7 +56,7 @@
 #define     CONTEXT_THRESHOLD          0x00
 #define     CONTEXT_CLAIM              0x04
 
-#define        PLIC_DISABLE_THRESHOLD          0xf
+#define        PLIC_DISABLE_THRESHOLD          0x7
 #define        PLIC_ENABLE_THRESHOLD           0
 
 struct plic_priv {
index 8f6e6b08eadf16f10b2a2444695987bd1db8b8bc..7e3ebf6ed2cd141ae6830d3c2dc659db874e3479 100644 (file)
@@ -37,6 +37,7 @@
 #define VINT_ENABLE_SET_OFFSET 0x0
 #define VINT_ENABLE_CLR_OFFSET 0x8
 #define VINT_STATUS_OFFSET     0x18
+#define VINT_STATUS_MASKED_OFFSET      0x20
 
 /**
  * struct ti_sci_inta_event_desc - Description of an event coming to
@@ -116,7 +117,7 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc)
        chained_irq_enter(irq_desc_get_chip(desc), desc);
 
        val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
-                           VINT_STATUS_OFFSET);
+                           VINT_STATUS_MASKED_OFFSET);
 
        for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) {
                virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq);
index e325e87c05934cd187a78b273688ea8b82baee55..11e8c7d8b6e89d361f8709251722f8d2e9c598c1 100644 (file)
@@ -743,10 +743,10 @@ check_send(struct isar_hw *isar, u8 rdm)
        }
 }
 
-const char *dmril[] = {"NO SPEED", "1200/75", "NODEF2", "75/1200", "NODEF4",
+static const char *dmril[] = {"NO SPEED", "1200/75", "NODEF2", "75/1200", "NODEF4",
                       "300", "600", "1200", "2400", "4800", "7200",
                       "9600nt", "9600t", "12000", "14400", "WRONG"};
-const char *dmrim[] = {"NO MOD", "NO DEF", "V32/V32b", "V22", "V21",
+static const char *dmrim[] = {"NO MOD", "NO DEF", "V32/V32b", "V22", "V21",
                       "Bell103", "V23", "Bell202", "V17", "V29", "V27ter"};
 
 static void
index 7ddb742de1fe54428c82effde48c654f27afcf6e..653923896205c2103b9d123ee6c68196c15a0bc2 100644 (file)
@@ -18,3 +18,5 @@ spi-nor-objs                  += winbond.o
 spi-nor-objs                   += xilinx.o
 spi-nor-objs                   += xmc.o
 obj-$(CONFIG_MTD_SPI_NOR)      += spi-nor.o
+
+obj-$(CONFIG_MTD_SPI_NOR)      += controllers/
index 2d0d91db0ddb40e1c9d5c271268f343181b4790c..5c444cd722bdcde79e2037ddea815f9ad2780e8f 100644 (file)
@@ -66,58 +66,6 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
        MIB_DESC(1, 0xb8, "RxArlDrop"),
 };
 
-static int
-mt7623_trgmii_write(struct mt7530_priv *priv,  u32 reg, u32 val)
-{
-       int ret;
-
-       ret =  regmap_write(priv->ethernet, TRGMII_BASE(reg), val);
-       if (ret < 0)
-               dev_err(priv->dev,
-                       "failed to priv write register\n");
-       return ret;
-}
-
-static u32
-mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg)
-{
-       int ret;
-       u32 val;
-
-       ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val);
-       if (ret < 0) {
-               dev_err(priv->dev,
-                       "failed to priv read register\n");
-               return ret;
-       }
-
-       return val;
-}
-
-static void
-mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg,
-                 u32 mask, u32 set)
-{
-       u32 val;
-
-       val = mt7623_trgmii_read(priv, reg);
-       val &= ~mask;
-       val |= set;
-       mt7623_trgmii_write(priv, reg, val);
-}
-
-static void
-mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val)
-{
-       mt7623_trgmii_rmw(priv, reg, 0, val);
-}
-
-static void
-mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val)
-{
-       mt7623_trgmii_rmw(priv, reg, val, 0);
-}
-
 static int
 core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
 {
@@ -530,27 +478,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
                for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
                        mt7530_rmw(priv, MT7530_TRGMII_RD(i),
                                   RD_TAP_MASK, RD_TAP(16));
-       else
-               if (priv->id != ID_MT7621)
-                       mt7623_trgmii_set(priv, GSW_INTF_MODE,
-                                         INTF_MODE_TRGMII);
-
-       return 0;
-}
-
-static int
-mt7623_pad_clk_setup(struct dsa_switch *ds)
-{
-       struct mt7530_priv *priv = ds->priv;
-       int i;
-
-       for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
-               mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i),
-                                   TD_DM_DRVP(8) | TD_DM_DRVN(8));
-
-       mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL);
-       mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST);
-
        return 0;
 }
 
@@ -846,8 +773,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
         */
        mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
                   MT7530_PORT_MATRIX_MODE);
-       mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
-                  VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
+       mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+                  VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
+                  PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
 
        for (i = 0; i < MT7530_NUM_PORTS; i++) {
                if (dsa_is_user_port(ds, i) &&
@@ -863,8 +791,8 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
        if (all_user_ports_removed) {
                mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
                             PCR_MATRIX(dsa_user_ports(priv->ds)));
-               mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
-                            PORT_SPEC_TAG);
+               mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
+                            | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
        }
 }
 
@@ -890,8 +818,9 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
        /* Set the port as a user port which is to be able to recognize VID
         * from incoming packets before fetching entry within the VLAN table.
         */
-       mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
-                  VLAN_ATTR(MT7530_VLAN_USER));
+       mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+                  VLAN_ATTR(MT7530_VLAN_USER) |
+                  PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
 }
 
 static void
@@ -1303,10 +1232,6 @@ mt7530_setup(struct dsa_switch *ds)
        dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
 
        if (priv->id == ID_MT7530) {
-               priv->ethernet = syscon_node_to_regmap(dn);
-               if (IS_ERR(priv->ethernet))
-                       return PTR_ERR(priv->ethernet);
-
                regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
                ret = regulator_enable(priv->core_pwr);
                if (ret < 0) {
@@ -1380,6 +1305,10 @@ mt7530_setup(struct dsa_switch *ds)
                        mt7530_cpu_port_enable(priv, i);
                else
                        mt7530_port_disable(ds, i);
+
+               /* Enable consistent egress tag */
+               mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+                          PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
        }
 
        /* Setup port 5 */
@@ -1468,14 +1397,6 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
                /* Setup TX circuit incluing relevant PAD and driving */
                mt7530_pad_clk_setup(ds, state->interface);
 
-               if (priv->id == ID_MT7530) {
-                       /* Setup RX circuit, relevant PAD and driving on the
-                        * host which must be placed after the setup on the
-                        * device side is all finished.
-                        */
-                       mt7623_pad_clk_setup(ds);
-               }
-
                priv->p6_interface = state->interface;
                break;
        default:
index ef9b52f3152b6e9db072e770e2da85062ef1cdd2..979bb6374678fda639e2da2b732d707145e6ee6b 100644 (file)
@@ -172,9 +172,16 @@ enum mt7530_port_mode {
 /* Register for port vlan control */
 #define MT7530_PVC_P(x)                        (0x2010 + ((x) * 0x100))
 #define  PORT_SPEC_TAG                 BIT(5)
+#define  PVC_EG_TAG(x)                 (((x) & 0x7) << 8)
+#define  PVC_EG_TAG_MASK               PVC_EG_TAG(7)
 #define  VLAN_ATTR(x)                  (((x) & 0x3) << 6)
 #define  VLAN_ATTR_MASK                        VLAN_ATTR(3)
 
+enum mt7530_vlan_port_eg_tag {
+       MT7530_VLAN_EG_DISABLED = 0,
+       MT7530_VLAN_EG_CONSISTENT = 1,
+};
+
 enum mt7530_vlan_port_attr {
        MT7530_VLAN_USER = 0,
        MT7530_VLAN_TRANSPARENT = 3,
@@ -277,7 +284,6 @@ enum mt7530_vlan_port_attr {
 
 /* Registers for TRGMII on the both side */
 #define MT7530_TRGMII_RCK_CTRL         0x7a00
-#define GSW_TRGMII_RCK_CTRL            0x300
 #define  RX_RST                                BIT(31)
 #define  RXC_DQSISEL                   BIT(30)
 #define  DQSI1_TAP_MASK                        (0x7f << 8)
@@ -286,31 +292,24 @@ enum mt7530_vlan_port_attr {
 #define  DQSI0_TAP(x)                  ((x) & 0x7f)
 
 #define MT7530_TRGMII_RCK_RTT          0x7a04
-#define GSW_TRGMII_RCK_RTT             0x304
 #define  DQS1_GATE                     BIT(31)
 #define  DQS0_GATE                     BIT(30)
 
 #define MT7530_TRGMII_RD(x)            (0x7a10 + (x) * 8)
-#define GSW_TRGMII_RD(x)               (0x310 + (x) * 8)
 #define  BSLIP_EN                      BIT(31)
 #define  EDGE_CHK                      BIT(30)
 #define  RD_TAP_MASK                   0x7f
 #define  RD_TAP(x)                     ((x) & 0x7f)
 
-#define GSW_TRGMII_TXCTRL              0x340
 #define MT7530_TRGMII_TXCTRL           0x7a40
 #define  TRAIN_TXEN                    BIT(31)
 #define  TXC_INV                       BIT(30)
 #define  TX_RST                                BIT(28)
 
 #define MT7530_TRGMII_TD_ODT(i)                (0x7a54 + 8 * (i))
-#define GSW_TRGMII_TD_ODT(i)           (0x354 + 8 * (i))
 #define  TD_DM_DRVP(x)                 ((x) & 0xf)
 #define  TD_DM_DRVN(x)                 (((x) & 0xf) << 4)
 
-#define GSW_INTF_MODE                  0x390
-#define  INTF_MODE_TRGMII              BIT(1)
-
 #define MT7530_TRGMII_TCK_CTRL         0x7a78
 #define  TCK_TAP(x)                    (((x) & 0xf) << 8)
 
@@ -443,7 +442,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
  * @ds:                        The pointer to the dsa core structure
  * @bus:               The bus used for the device and built-in PHY
  * @rstc:              The pointer to reset control used by MCM
- * @ethernet:          The regmap used for access TRGMII-based registers
  * @core_pwr:          The power supplied into the core
  * @io_pwr:            The power supplied into the I/O
  * @reset:             The descriptor for GPIO line tied to its reset pin
@@ -460,7 +458,6 @@ struct mt7530_priv {
        struct dsa_switch       *ds;
        struct mii_bus          *bus;
        struct reset_control    *rstc;
-       struct regmap           *ethernet;
        struct regulator        *core_pwr;
        struct regulator        *io_pwr;
        struct gpio_desc        *reset;
index 221593261e8feb0a8c2c485da18fc8b4cbc0bb9a..dd8a5666a584f54141e4cd745c421139521d76bd 100644 (file)
@@ -709,7 +709,8 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       if (!mv88e6xxx_port_ppu_updates(chip, port) && ops->port_set_link)
+       if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+            mode == MLO_AN_FIXED) && ops->port_set_link)
                err = ops->port_set_link(chip, port, LINK_FORCED_DOWN);
        mv88e6xxx_reg_unlock(chip);
 
@@ -731,7 +732,7 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       if (!mv88e6xxx_port_ppu_updates(chip, port)) {
+       if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) {
                /* FIXME: for an automedia port, should we force the link
                 * down here - what if the link comes up due to "other" media
                 * while we're bringing the port up, how is the exclusivity
index 79ca3aadb86410728a84eb2e32953a7b6acc6de8..d0a3764ff0cf83249f60c0b68dc6cde48f17295d 100644 (file)
@@ -46,11 +46,8 @@ static int felix_fdb_add(struct dsa_switch *ds, int port,
                         const unsigned char *addr, u16 vid)
 {
        struct ocelot *ocelot = ds->priv;
-       bool vlan_aware;
 
-       vlan_aware = dsa_port_is_vlan_filtering(dsa_to_port(ds, port));
-
-       return ocelot_fdb_add(ocelot, port, addr, vid, vlan_aware);
+       return ocelot_fdb_add(ocelot, port, addr, vid);
 }
 
 static int felix_fdb_del(struct dsa_switch *ds, int port,
index b71f9b04a51e187ce96fb4073287bc4393901cd9..a87264f95f1a49b39211690da0db147d571d2bcb 100644 (file)
@@ -514,7 +514,7 @@ static void xgbe_isr_task(unsigned long data)
                                xgbe_disable_rx_tx_ints(pdata);
 
                                /* Turn on polling */
-                               __napi_schedule_irqoff(&pdata->napi);
+                               __napi_schedule(&pdata->napi);
                        }
                } else {
                        /* Don't clear Rx/Tx status if doing per channel DMA
index 239f678a94ed5817af132825078838fb883564e0..2a3480fc1d91493dd3813b28e585ef571c942f4b 100644 (file)
@@ -3742,7 +3742,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
                 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
                              &param, &val);
-       if (ret < 0)
+       if (ret)
                return ret;
        *phy_fw_ver = val;
        return 0;
index 48ea658aa1a6d74a4e27759f673800f80d7a86c3..15efc294f51356b55eedfee9f45dad1f7a86a916 100644 (file)
@@ -1277,7 +1277,7 @@ static const struct net_device_ops tulip_netdev_ops = {
 #endif
 };
 
-const struct pci_device_id early_486_chipsets[] = {
+static const struct pci_device_id early_486_chipsets[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
        { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
        { },
index bd898f5b4da53e0412ce941ceb00f83559356d18..e74dd1f86bbae8ef45eaca6a71c80f5324f9f5c5 100644 (file)
@@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q {
        struct  sk_buff *rx_skbuff[RX_RING_SIZE];
 };
 
+struct fec_stop_mode_gpr {
+       struct regmap *gpr;
+       u8 reg;
+       u8 bit;
+};
+
 /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
  * tx_bd_base always point to the base of the buffer descriptors.  The
  * cur_rx and cur_tx point to the currently available buffer.
@@ -562,6 +568,7 @@ struct fec_enet_private {
        int hwts_tx_en;
        struct delayed_work time_keep;
        struct regulator *reg_phy;
+       struct fec_stop_mode_gpr stop_gpr;
 
        unsigned int tx_align;
        unsigned int rx_align;
index c1c267b61647af88c36a73304a21770fee1df310..dc6f8763a5d4062c33bd553ce0899079833113be 100644 (file)
@@ -62,6 +62,8 @@
 #include <linux/if_vlan.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/prefetch.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 #include <soc/imx/cpuidle.h>
 
 #include <asm/cacheflush.h>
@@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
 #define FEC_ENET_OPD_V 0xFFF0
 #define FEC_MDIO_PM_TIMEOUT  100 /* ms */
 
+struct fec_devinfo {
+       u32 quirks;
+       u8 stop_gpr_reg;
+       u8 stop_gpr_bit;
+};
+
+static const struct fec_devinfo fec_imx25_info = {
+       .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+                 FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx27_info = {
+       .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx28_info = {
+       .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+                 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+                 FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx6q_info = {
+       .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+                 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+                 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+                 FEC_QUIRK_HAS_RACC,
+       .stop_gpr_reg = 0x34,
+       .stop_gpr_bit = 27,
+};
+
+static const struct fec_devinfo fec_mvf600_info = {
+       .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+};
+
+static const struct fec_devinfo fec_imx6x_info = {
+       .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+                 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+                 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+                 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+                 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+};
+
+static const struct fec_devinfo fec_imx6ul_info = {
+       .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+                 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+                 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+                 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+                 FEC_QUIRK_HAS_COALESCE,
+};
+
 static struct platform_device_id fec_devtype[] = {
        {
                /* keep it for coldfire */
@@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx25-fec",
-               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
-                              FEC_QUIRK_HAS_FRREG,
+               .driver_data = (kernel_ulong_t)&fec_imx25_info,
        }, {
                .name = "imx27-fec",
-               .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+               .driver_data = (kernel_ulong_t)&fec_imx27_info,
        }, {
                .name = "imx28-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
-                               FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
-                               FEC_QUIRK_HAS_FRREG,
+               .driver_data = (kernel_ulong_t)&fec_imx28_info,
        }, {
                .name = "imx6q-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
-                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
-                               FEC_QUIRK_HAS_RACC,
+               .driver_data = (kernel_ulong_t)&fec_imx6q_info,
        }, {
                .name = "mvf600-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+               .driver_data = (kernel_ulong_t)&fec_mvf600_info,
        }, {
                .name = "imx6sx-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
-                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
-                               FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
-                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+               .driver_data = (kernel_ulong_t)&fec_imx6x_info,
        }, {
                .name = "imx6ul-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
-                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
-                               FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
-                               FEC_QUIRK_HAS_COALESCE,
+               .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
        }, {
                /* sentinel */
        }
@@ -1092,11 +1130,28 @@ fec_restart(struct net_device *ndev)
 
 }
 
+static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
+{
+       struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+       struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
+
+       if (stop_gpr->gpr) {
+               if (enabled)
+                       regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+                                          BIT(stop_gpr->bit),
+                                          BIT(stop_gpr->bit));
+               else
+                       regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+                                          BIT(stop_gpr->bit), 0);
+       } else if (pdata && pdata->sleep_mode_enable) {
+               pdata->sleep_mode_enable(enabled);
+       }
+}
+
 static void
 fec_stop(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
        u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
        u32 val;
 
@@ -1125,9 +1180,7 @@ fec_stop(struct net_device *ndev)
                val = readl(fep->hwp + FEC_ECNTRL);
                val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
                writel(val, fep->hwp + FEC_ECNTRL);
-
-               if (pdata && pdata->sleep_mode_enable)
-                       pdata->sleep_mode_enable(true);
+               fec_enet_stop_mode(fep, true);
        }
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
@@ -3398,6 +3451,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
        return irq_cnt;
 }
 
+static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
+                                  struct fec_devinfo *dev_info,
+                                  struct device_node *np)
+{
+       struct device_node *gpr_np;
+       int ret = 0;
+
+       if (!dev_info)
+               return 0;
+
+       gpr_np = of_parse_phandle(np, "gpr", 0);
+       if (!gpr_np)
+               return 0;
+
+       fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
+       if (IS_ERR(fep->stop_gpr.gpr)) {
+               dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
+               ret = PTR_ERR(fep->stop_gpr.gpr);
+               fep->stop_gpr.gpr = NULL;
+               goto out;
+       }
+
+       fep->stop_gpr.reg = dev_info->stop_gpr_reg;
+       fep->stop_gpr.bit = dev_info->stop_gpr_bit;
+
+out:
+       of_node_put(gpr_np);
+
+       return ret;
+}
+
 static int
 fec_probe(struct platform_device *pdev)
 {
@@ -3413,6 +3497,7 @@ fec_probe(struct platform_device *pdev)
        int num_rx_qs;
        char irq_name[8];
        int irq_cnt;
+       struct fec_devinfo *dev_info;
 
        fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
 
@@ -3430,7 +3515,9 @@ fec_probe(struct platform_device *pdev)
        of_id = of_match_device(fec_dt_ids, &pdev->dev);
        if (of_id)
                pdev->id_entry = of_id->data;
-       fep->quirks = pdev->id_entry->driver_data;
+       dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+       if (dev_info)
+               fep->quirks = dev_info->quirks;
 
        fep->netdev = ndev;
        fep->num_rx_queues = num_rx_qs;
@@ -3464,6 +3551,10 @@ fec_probe(struct platform_device *pdev)
        if (of_get_property(np, "fsl,magic-packet", NULL))
                fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
 
+       ret = fec_enet_init_stop_mode(fep, dev_info, np);
+       if (ret)
+               goto failed_stop_mode;
+
        phy_node = of_parse_phandle(np, "phy-handle", 0);
        if (!phy_node && of_phy_is_fixed_link(np)) {
                ret = of_phy_register_fixed_link(np);
@@ -3632,6 +3723,7 @@ failed_clk:
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(phy_node);
+failed_stop_mode:
 failed_phy:
        dev_id--;
 failed_ioremap:
@@ -3709,7 +3801,6 @@ static int __maybe_unused fec_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
-       struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
        int ret;
        int val;
 
@@ -3727,8 +3818,8 @@ static int __maybe_unused fec_resume(struct device *dev)
                        goto failed_clk;
                }
                if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
-                       if (pdata && pdata->sleep_mode_enable)
-                               pdata->sleep_mode_enable(false);
+                       fec_enet_stop_mode(fep, false);
+
                        val = readl(fep->hwp + FEC_ECNTRL);
                        val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
                        writel(val, fep->hwp + FEC_ECNTRL);
index 5be61f73b6abb79268e6b5e85a2f8f3c5c382831..51889770958d841f93c130e045fc10eca45c6cbc 100644 (file)
@@ -5383,7 +5383,7 @@ static int __init mvneta_driver_init(void)
 {
        int ret;
 
-       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
                                      mvneta_cpu_online,
                                      mvneta_cpu_down_prepare);
        if (ret < 0)
index 8d28f90acfe76d5a79130ba62b1578dce95b05be..09047109d0daac60bc85e6f64c5bce3cb91764d8 100644 (file)
@@ -65,6 +65,17 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
        return __raw_readl(eth->base + reg);
 }
 
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+{
+       u32 val;
+
+       val = mtk_r32(eth, reg);
+       val &= ~mask;
+       val |= set;
+       mtk_w32(eth, val, reg);
+       return reg;
+}
+
 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
 {
        unsigned long t_start = jiffies;
@@ -193,7 +204,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
        struct mtk_mac *mac = container_of(config, struct mtk_mac,
                                           phylink_config);
        struct mtk_eth *eth = mac->hw;
-       u32 mcr_cur, mcr_new, sid;
+       u32 mcr_cur, mcr_new, sid, i;
        int val, ge_mode, err;
 
        /* MT76x8 has no hardware settings between for the MAC */
@@ -255,6 +266,17 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
                                    PHY_INTERFACE_MODE_TRGMII)
                                        mtk_gmac0_rgmii_adjust(mac->hw,
                                                               state->speed);
+
+                               /* mt7623_pad_clk_setup */
+                               for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+                                       mtk_w32(mac->hw,
+                                               TD_DM_DRVP(8) | TD_DM_DRVN(8),
+                                               TRGMII_TD_ODT(i));
+
+                               /* Assert/release MT7623 RXC reset */
+                               mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
+                                       TRGMII_RCK_CTRL);
+                               mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
                        }
                }
 
index 85830fe14a1b474613b6b2306b0cf28e49c59a97..454cfcd465fdafae17e4c5340d5b432431d6a6d5 100644 (file)
 #define DQSI0(x)               ((x << 0) & GENMASK(6, 0))
 #define DQSI1(x)               ((x << 8) & GENMASK(14, 8))
 #define RXCTL_DMWTLAT(x)       ((x << 16) & GENMASK(18, 16))
+#define RXC_RST                        BIT(31)
 #define RXC_DQSISEL            BIT(30)
 #define RCK_CTRL_RGMII_1000    (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
 #define RCK_CTRL_RGMII_10_100  RXCTL_DMWTLAT(2)
 
+#define NUM_TRGMII_CTRL                5
+
 /* TRGMII RXC control register */
 #define TRGMII_TCK_CTRL                0x10340
 #define TXCTL_DMWTLAT(x)       ((x << 16) & GENMASK(18, 16))
 #define TCK_CTRL_RGMII_1000    TXCTL_DMWTLAT(2)
 #define TCK_CTRL_RGMII_10_100  (TXC_INV | TXCTL_DMWTLAT(2))
 
+/* TRGMII TX Drive Strength */
+#define TRGMII_TD_ODT(i)       (0x10354 + 8 * (i))
+#define  TD_DM_DRVP(x)         ((x) & 0xf)
+#define  TD_DM_DRVN(x)         (((x) & 0xf) << 4)
+
 /* TRGMII Interface mode register */
 #define INTF_MODE              0x10390
 #define TRGMII_INTF_DIS                BIT(0)
index bdeb291f6b679ac53b1fb4e67b944b7783054ed3..e94f0c4d74a724fb621cd102780e56e62ae87292 100644 (file)
@@ -23,7 +23,10 @@ static int mlx5_devlink_flash_update(struct devlink *devlink,
        if (err)
                return err;
 
-       return mlx5_firmware_flash(dev, fw, extack);
+       err = mlx5_firmware_flash(dev, fw, extack);
+       release_firmware(fw);
+
+       return err;
 }
 
 static u8 mlx5_fw_ver_major(u32 version)
index ad3e3a65d403c1e776e18b97e569e9e180add40b..16416eaac39ef5ab1de154ae97c555ea238a2e31 100644 (file)
@@ -67,11 +67,9 @@ struct mlx5_ct_ft {
        struct nf_flowtable *nf_ft;
        struct mlx5_tc_ct_priv *ct_priv;
        struct rhashtable ct_entries_ht;
-       struct list_head ct_entries_list;
 };
 
 struct mlx5_ct_entry {
-       struct list_head list;
        u16 zone;
        struct rhash_head node;
        struct flow_rule *flow_rule;
@@ -617,8 +615,6 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
        if (err)
                goto err_insert;
 
-       list_add(&entry->list, &ft->ct_entries_list);
-
        return 0;
 
 err_insert:
@@ -646,7 +642,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
        WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
                                       &entry->node,
                                       cts_ht_params));
-       list_del(&entry->list);
        kfree(entry);
 
        return 0;
@@ -818,7 +813,6 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
        ft->zone = zone;
        ft->nf_ft = nf_ft;
        ft->ct_priv = ct_priv;
-       INIT_LIST_HEAD(&ft->ct_entries_list);
        refcount_set(&ft->refcount, 1);
 
        err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
@@ -847,12 +841,12 @@ err_init:
 }
 
 static void
-mlx5_tc_ct_flush_ft(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
+mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
 {
-       struct mlx5_ct_entry *entry;
+       struct mlx5_tc_ct_priv *ct_priv = arg;
+       struct mlx5_ct_entry *entry = ptr;
 
-       list_for_each_entry(entry, &ft->ct_entries_list, list)
-               mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+       mlx5_tc_ct_entry_del_rules(ct_priv, entry);
 }
 
 static void
@@ -863,9 +857,10 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
 
        nf_flow_table_offload_del_cb(ft->nf_ft,
                                     mlx5_tc_ct_block_flow_offload, ft);
-       mlx5_tc_ct_flush_ft(ct_priv, ft);
        rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
-       rhashtable_destroy(&ft->ct_entries_ht);
+       rhashtable_free_and_destroy(&ft->ct_entries_ht,
+                                   mlx5_tc_ct_flush_ft_entry,
+                                   ct_priv);
        kfree(ft);
 }
 
index dd7f338425eb1e87328fc6ef6a7d75151e11cb69..f02150a97ac803e07fe7858d2c7333738e012b97 100644 (file)
@@ -5526,8 +5526,8 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        mlx5e_dcbnl_delete_app(priv);
 #endif
-       mlx5e_devlink_port_unregister(priv);
        unregister_netdev(priv->netdev);
+       mlx5e_devlink_port_unregister(priv);
        mlx5e_detach(mdev, vpriv);
        mlx5e_destroy_netdev(priv);
 }
index 2a0243e4af757ea59df77ea4386004827989b0d6..55457f268495e68205fee6bdf9ae166b130c67ea 100644 (file)
@@ -2050,29 +2050,30 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
        struct mlx5_eswitch_rep *rep = rpriv->rep;
        struct netdev_phys_item_id ppid = {};
        unsigned int dl_port_index = 0;
+       u16 pfnum;
 
        if (!is_devlink_port_supported(dev, rpriv))
                return 0;
 
        mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
+       pfnum = PCI_FUNC(dev->pdev->devfn);
 
        if (rep->vport == MLX5_VPORT_UPLINK) {
                devlink_port_attrs_set(&rpriv->dl_port,
                                       DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                                      PCI_FUNC(dev->pdev->devfn), false, 0,
+                                      pfnum, false, 0,
                                       &ppid.id[0], ppid.id_len);
                dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
        } else if (rep->vport == MLX5_VPORT_PF) {
                devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
                                              &ppid.id[0], ppid.id_len,
-                                             dev->pdev->devfn);
+                                             pfnum);
                dl_port_index = rep->vport;
        } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
                                            rpriv->rep->vport)) {
                devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
                                              &ppid.id[0], ppid.id_len,
-                                             dev->pdev->devfn,
-                                             rep->vport - 1);
+                                             pfnum, rep->vport - 1);
                dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
        }
 
index 438128dde187d7ec58892c2879c6037f807f576f..a574c588269a0f0110eb05c745351c9a8c0c761e 100644 (file)
@@ -1343,7 +1343,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
        if (err)
                return err;
 
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+           !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
                err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
                dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
                if (err)
@@ -3558,12 +3559,13 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
                               struct mlx5_esw_flow_attr *attr,
                               u32 *action)
 {
-       int nest_level = attr->parse_attr->filter_dev->lower_level;
        struct flow_action_entry vlan_act = {
                .id = FLOW_ACTION_VLAN_POP,
        };
-       int err = 0;
+       int nest_level, err = 0;
 
+       nest_level = attr->parse_attr->filter_dev->lower_level -
+                                               priv->netdev->lower_level;
        while (nest_level--) {
                err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
                if (err)
index 39f42f985fbd622d1592a5632615c40e49402bf3..c1848b57f61c8dc5c5822fdb1c453726f140fba2 100644 (file)
@@ -403,7 +403,6 @@ enum {
        MLX5_ESW_ATTR_FLAG_VLAN_HANDLED  = BIT(0),
        MLX5_ESW_ATTR_FLAG_SLOW_PATH     = BIT(1),
        MLX5_ESW_ATTR_FLAG_NO_IN_PORT    = BIT(2),
-       MLX5_ESW_ATTR_FLAG_HAIRPIN       = BIT(3),
 };
 
 struct mlx5_esw_flow_attr {
index f171eb2234b07e573ea49556582820d47ecf0ecd..b2e38e0cde976dd71534a0a70acc8397e7c188ea 100644 (file)
@@ -300,7 +300,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
        bool split = !!(attr->split_count);
        struct mlx5_flow_handle *rule;
        struct mlx5_flow_table *fdb;
-       bool hairpin = false;
        int j, i = 0;
 
        if (esw->mode != MLX5_ESWITCH_OFFLOADS)
@@ -398,21 +397,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
                goto err_esw_get;
        }
 
-       if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) {
+       if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
                rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
                                                     &flow_act, dest, i);
-               hairpin = true;
-       } else {
+       else
                rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
-       }
        if (IS_ERR(rule))
                goto err_add_rule;
        else
                atomic64_inc(&esw->offloads.num_flows);
 
-       if (hairpin)
-               attr->flags |= MLX5_ESW_ATTR_FLAG_HAIRPIN;
-
        return rule;
 
 err_add_rule:
@@ -501,7 +495,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
 
        mlx5_del_flow_rules(rule);
 
-       if (attr->flags & MLX5_ESW_ATTR_FLAG_HAIRPIN) {
+       if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
                /* unref the term table */
                for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
                        if (attr->dests[i].termtbl)
index fa1665caac462c3aae87996a5ed6d8ff864124be..f99e1752d4e5c087fe4b814cc3aebdf8a5ff0a22 100644 (file)
@@ -243,7 +243,7 @@ recover_from_sw_reset:
                if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
                        break;
 
-               cond_resched();
+               msleep(20);
        } while (!time_after(jiffies, end));
 
        if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
index b4731df186f497a3b7a58e53ec6e0b7cd81e3761..a8c48a4a708f27f954c3adc0309205f545653b2b 100644 (file)
@@ -183,44 +183,47 @@ static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
        ocelot_write(ocelot, val, ANA_VLANMASK);
 }
 
-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
-                               bool vlan_aware)
+static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+                                      u16 vid)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
-       u32 val;
+       u32 val = 0;
 
-       if (vlan_aware)
-               val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
-                     ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
-       else
-               val = 0;
-       ocelot_rmw_gix(ocelot, val,
-                      ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
-                      ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
-                      ANA_PORT_VLAN_CFG, port);
+       if (ocelot_port->vid != vid) {
+               /* Always permit deleting the native VLAN (vid = 0) */
+               if (ocelot_port->vid && vid) {
+                       dev_err(ocelot->dev,
+                               "Port already has a native VLAN: %d\n",
+                               ocelot_port->vid);
+                       return -EBUSY;
+               }
+               ocelot_port->vid = vid;
+       }
+
+       ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
+                      REW_PORT_VLAN_CFG_PORT_VID_M,
+                      REW_PORT_VLAN_CFG, port);
 
-       if (vlan_aware && !ocelot_port->vid)
+       if (ocelot_port->vlan_aware && !ocelot_port->vid)
                /* If port is vlan-aware and tagged, drop untagged and priority
                 * tagged frames.
                 */
                val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
                      ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
                      ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
-       else
-               val = 0;
        ocelot_rmw_gix(ocelot, val,
                       ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
                       ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
                       ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
                       ANA_PORT_DROP_CFG, port);
 
-       if (vlan_aware) {
+       if (ocelot_port->vlan_aware) {
                if (ocelot_port->vid)
                        /* Tag all frames except when VID == DEFAULT_VLAN */
-                       val |= REW_TAG_CFG_TAG_CFG(1);
+                       val = REW_TAG_CFG_TAG_CFG(1);
                else
                        /* Tag all frames */
-                       val |= REW_TAG_CFG_TAG_CFG(3);
+                       val = REW_TAG_CFG_TAG_CFG(3);
        } else {
                /* Port tagging disabled. */
                val = REW_TAG_CFG_TAG_CFG(0);
@@ -228,31 +231,31 @@ void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
        ocelot_rmw_gix(ocelot, val,
                       REW_TAG_CFG_TAG_CFG_M,
                       REW_TAG_CFG, port);
+
+       return 0;
 }
-EXPORT_SYMBOL(ocelot_port_vlan_filtering);
 
-static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
-                                      u16 vid)
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+                               bool vlan_aware)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       u32 val;
 
-       if (ocelot_port->vid != vid) {
-               /* Always permit deleting the native VLAN (vid = 0) */
-               if (ocelot_port->vid && vid) {
-                       dev_err(ocelot->dev,
-                               "Port already has a native VLAN: %d\n",
-                               ocelot_port->vid);
-                       return -EBUSY;
-               }
-               ocelot_port->vid = vid;
-       }
+       ocelot_port->vlan_aware = vlan_aware;
 
-       ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
-                      REW_PORT_VLAN_CFG_PORT_VID_M,
-                      REW_PORT_VLAN_CFG, port);
+       if (vlan_aware)
+               val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+                     ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+       else
+               val = 0;
+       ocelot_rmw_gix(ocelot, val,
+                      ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+                      ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+                      ANA_PORT_VLAN_CFG, port);
 
-       return 0;
+       ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
 }
+EXPORT_SYMBOL(ocelot_port_vlan_filtering);
 
 /* Default vlan to clasify for untagged frames (may be zero) */
 static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
@@ -873,12 +876,12 @@ static void ocelot_get_stats64(struct net_device *dev,
 }
 
 int ocelot_fdb_add(struct ocelot *ocelot, int port,
-                  const unsigned char *addr, u16 vid, bool vlan_aware)
+                  const unsigned char *addr, u16 vid)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
 
        if (!vid) {
-               if (!vlan_aware)
+               if (!ocelot_port->vlan_aware)
                        /* If the bridge is not VLAN aware and no VID was
                         * provided, set it to pvid to ensure the MAC entry
                         * matches incoming untagged packets
@@ -905,7 +908,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        struct ocelot *ocelot = priv->port.ocelot;
        int port = priv->chip_port;
 
-       return ocelot_fdb_add(ocelot, port, addr, vid, priv->vlan_aware);
+       return ocelot_fdb_add(ocelot, port, addr, vid);
 }
 
 int ocelot_fdb_del(struct ocelot *ocelot, int port,
@@ -1496,8 +1499,8 @@ static int ocelot_port_attr_set(struct net_device *dev,
                ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
                break;
        case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
-               priv->vlan_aware = attr->u.vlan_filtering;
-               ocelot_port_vlan_filtering(ocelot, port, priv->vlan_aware);
+               ocelot_port_vlan_filtering(ocelot, port,
+                                          attr->u.vlan_filtering);
                break;
        case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
                ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
@@ -1868,7 +1871,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
                        } else {
                                err = ocelot_port_bridge_leave(ocelot, port,
                                                               info->upper_dev);
-                               priv->vlan_aware = false;
                        }
                }
                if (netif_is_lag_master(info->upper_dev)) {
index e34ef8380eb326b8315a5203d6932fa046cd9e47..641af929497fd24a3d2941b5d423a85a4fd61169 100644 (file)
@@ -56,8 +56,6 @@ struct ocelot_port_private {
        struct phy_device *phy;
        u8 chip_port;
 
-       u8 vlan_aware;
-
        struct phy *serdes;
 
        struct ocelot_port_tc tc;
index 0ec6b8e8b5494bb51e10a0a64e26476209c30fee..67e62603fe3bfbd498573f3530039d7fad0dc20c 100644 (file)
@@ -5155,7 +5155,7 @@ static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
 /* read mac entries from CAM */
 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
 {
-       u64 tmp64 = 0xffffffffffff0000ULL, val64;
+       u64 tmp64, val64;
        struct XENA_dev_config __iomem *bar0 = sp->bar0;
 
        /* read mac addr */
index 4b8a76098ca399d601150f8226aacc25b2de3096..5acf4f46c268b96f973ece41a36e6e185a10c704 100644 (file)
@@ -2127,6 +2127,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
        if (lif->registered)
                ionic_lif_set_netdev_info(lif);
 
+       ionic_rx_filter_replay(lif);
+
        if (netif_running(lif->netdev)) {
                err = ionic_txrx_alloc(lif);
                if (err)
@@ -2206,9 +2208,9 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
        if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
                cancel_work_sync(&lif->deferred.work);
                cancel_work_sync(&lif->tx_timeout_work);
+               ionic_rx_filters_deinit(lif);
        }
 
-       ionic_rx_filters_deinit(lif);
        if (lif->netdev->features & NETIF_F_RXHASH)
                ionic_lif_rss_deinit(lif);
 
@@ -2339,24 +2341,30 @@ static int ionic_station_set(struct ionic_lif *lif)
        err = ionic_adminq_post_wait(lif, &ctx);
        if (err)
                return err;
-
+       netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
+                  ctx.comp.lif_getattr.mac);
        if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
                return 0;
 
-       memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
-       addr.sa_family = AF_INET;
-       err = eth_prepare_mac_addr_change(netdev, &addr);
-       if (err) {
-               netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
-                           addr.sa_data, err);
-               return 0;
-       }
+       if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) {
+               memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+               addr.sa_family = AF_INET;
+               err = eth_prepare_mac_addr_change(netdev, &addr);
+               if (err) {
+                       netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
+                                   addr.sa_data, err);
+                       return 0;
+               }
 
-       netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
-                  netdev->dev_addr);
-       ionic_lif_addr(lif, netdev->dev_addr, false);
+               if (!is_zero_ether_addr(netdev->dev_addr)) {
+                       netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
+                                  netdev->dev_addr);
+                       ionic_lif_addr(lif, netdev->dev_addr, false);
+               }
+
+               eth_commit_mac_addr_change(netdev, &addr);
+       }
 
-       eth_commit_mac_addr_change(netdev, &addr);
        netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
                   netdev->dev_addr);
        ionic_lif_addr(lif, netdev->dev_addr, true);
@@ -2421,9 +2429,11 @@ static int ionic_lif_init(struct ionic_lif *lif)
        if (err)
                goto err_out_notifyq_deinit;
 
-       err = ionic_rx_filters_init(lif);
-       if (err)
-               goto err_out_notifyq_deinit;
+       if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+               err = ionic_rx_filters_init(lif);
+               if (err)
+                       goto err_out_notifyq_deinit;
+       }
 
        err = ionic_station_set(lif);
        if (err)
index 7a093f148ee58a5296ed50f9657f7e8f127c89cf..80eeb7696e0144768994509c26c1ed8e55e4e9c4 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
 
 #include <linux/netdevice.h>
+#include <linux/dynamic_debug.h>
 #include <linux/etherdevice.h>
 
 #include "ionic.h"
@@ -17,17 +18,49 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
        devm_kfree(dev, f);
 }
 
-int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f)
+void ionic_rx_filter_replay(struct ionic_lif *lif)
 {
-       struct ionic_admin_ctx ctx = {
-               .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
-               .cmd.rx_filter_del = {
-                       .opcode = IONIC_CMD_RX_FILTER_DEL,
-                       .filter_id = cpu_to_le32(f->filter_id),
-               },
-       };
-
-       return ionic_adminq_post_wait(lif, &ctx);
+       struct ionic_rx_filter_add_cmd *ac;
+       struct ionic_admin_ctx ctx;
+       struct ionic_rx_filter *f;
+       struct hlist_head *head;
+       struct hlist_node *tmp;
+       unsigned int i;
+       int err;
+
+       ac = &ctx.cmd.rx_filter_add;
+
+       for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+               head = &lif->rx_filters.by_id[i];
+               hlist_for_each_entry_safe(f, tmp, head, by_id) {
+                       ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
+                       memcpy(ac, &f->cmd, sizeof(f->cmd));
+                       dev_dbg(&lif->netdev->dev, "replay filter command:\n");
+                       dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+                                        &ctx.cmd, sizeof(ctx.cmd), true);
+
+                       err = ionic_adminq_post_wait(lif, &ctx);
+                       if (err) {
+                               switch (le16_to_cpu(ac->match)) {
+                               case IONIC_RX_FILTER_MATCH_VLAN:
+                                       netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
+                                                   err,
+                                                   le16_to_cpu(ac->vlan.vlan));
+                                       break;
+                               case IONIC_RX_FILTER_MATCH_MAC:
+                                       netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
+                                                   err, ac->mac.addr);
+                                       break;
+                               case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+                                       netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
+                                                   err,
+                                                   le16_to_cpu(ac->vlan.vlan),
+                                                   ac->mac.addr);
+                                       break;
+                               }
+                       }
+               }
+       }
 }
 
 int ionic_rx_filters_init(struct ionic_lif *lif)
index b6aec9c1991814e5dfb2758504dd3c26a1813f68..cf8f4c0a961c763201d7f99c13aa35352195b383 100644 (file)
@@ -24,7 +24,7 @@ struct ionic_rx_filters {
 };
 
 void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f);
-int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f);
+void ionic_rx_filter_replay(struct ionic_lif *lif);
 int ionic_rx_filters_init(struct ionic_lif *lif);
 void ionic_rx_filters_deinit(struct ionic_lif *lif);
 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
index e0212d2fc2a12c745d562b333b81688fe7227ecd..fa32cd5b418ef95c079190903193b118b1e406b9 100644 (file)
@@ -241,6 +241,8 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
        switch (phymode) {
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
                *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
                break;
        case PHY_INTERFACE_MODE_MII:
index 7d40760e9ba887693e5b6b2a925eee73bd7ad92d..0e1ca2cba3c7c5220d8c24b62e835f64e5bdc2a2 100644 (file)
@@ -150,6 +150,8 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
        plat_dat->init = sun7i_gmac_init;
        plat_dat->exit = sun7i_gmac_exit;
        plat_dat->fix_mac_speed = sun7i_fix_speed;
+       plat_dat->tx_fifo_size = 4096;
+       plat_dat->rx_fifo_size = 16384;
 
        ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
        if (ret)
index f71c15c394927efcaf3d0beb3de52c3ff91ee7c1..2bf56733ba94cf447592080e67ae3b99939a90ee 100644 (file)
@@ -1372,7 +1372,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
 err:
        i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
        if (i) {
-               dev_err(dev, "failed to add free_tx_chns action %d", i);
+               dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
                return i;
        }
 
@@ -1481,7 +1481,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
 err:
        i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
        if (i) {
-               dev_err(dev, "failed to add free_rx_chns action %d", i);
+               dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
                return i;
        }
 
@@ -1691,7 +1691,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
        ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
                                       ndev_priv->stats);
        if (ret) {
-               dev_err(dev, "failed to add percpu stat free action %d", ret);
+               dev_err(dev, "Failed to add percpu stat free action %d\n", ret);
                return ret;
        }
 
index 55c9329a4b1db62112bdbff6b3f841e567f4cfbc..ed10818dd99f2a6e5bcb56a662638fc39ce1305e 100644 (file)
@@ -297,14 +297,13 @@ static void ipa_modem_crashed(struct ipa *ipa)
 
        ret = ipa_endpoint_modem_exception_reset_all(ipa);
        if (ret)
-               dev_err(dev, "error %d resetting exception endpoint",
-                       ret);
+               dev_err(dev, "error %d resetting exception endpoint\n", ret);
 
        ipa_endpoint_modem_pause_all(ipa, false);
 
        ret = ipa_modem_stop(ipa);
        if (ret)
-               dev_err(dev, "error %d stopping modem", ret);
+               dev_err(dev, "error %d stopping modem\n", ret);
 
        /* Now prepare for the next modem boot */
        ret = ipa_mem_zero_modem(ipa);
index 0d580d81d910f0e8802684e163f89a286223a64a..a183250ff66adc83cce7767dab9a1e35c57f0b3b 100644 (file)
@@ -3809,7 +3809,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
                             struct netlink_ext_ack *extack)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
-       struct macsec_tx_sa tx_sc;
+       struct macsec_tx_sc tx_sc;
        struct macsec_secy secy;
        int ret;
 
index 4714ca0e0d4b215a958cbc534f5bc59e9744fd4f..7fc8e10c5f33729964f167b178bbf95618b2f43f 100644 (file)
@@ -1263,6 +1263,30 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
        int lpa;
        int err;
 
+       if (!(status & MII_M1011_PHY_STATUS_RESOLVED)) {
+               phydev->link = 0;
+               return 0;
+       }
+
+       if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
+               phydev->duplex = DUPLEX_FULL;
+       else
+               phydev->duplex = DUPLEX_HALF;
+
+       switch (status & MII_M1011_PHY_STATUS_SPD_MASK) {
+       case MII_M1011_PHY_STATUS_1000:
+               phydev->speed = SPEED_1000;
+               break;
+
+       case MII_M1011_PHY_STATUS_100:
+               phydev->speed = SPEED_100;
+               break;
+
+       default:
+               phydev->speed = SPEED_10;
+               break;
+       }
+
        if (!fiber) {
                err = genphy_read_lpa(phydev);
                if (err < 0)
@@ -1291,28 +1315,6 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
                }
        }
 
-       if (!(status & MII_M1011_PHY_STATUS_RESOLVED))
-               return 0;
-
-       if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
-               phydev->duplex = DUPLEX_FULL;
-       else
-               phydev->duplex = DUPLEX_HALF;
-
-       switch (status & MII_M1011_PHY_STATUS_SPD_MASK) {
-       case MII_M1011_PHY_STATUS_1000:
-               phydev->speed = SPEED_1000;
-               break;
-
-       case MII_M1011_PHY_STATUS_100:
-               phydev->speed = SPEED_100;
-               break;
-
-       default:
-               phydev->speed = SPEED_10;
-               break;
-       }
-
        return 0;
 }
 
index 7621badae64d6d2599a76ef03f46cb817524d046..95e3f4644aeb2047256716455ee49dfcfa91f772 100644 (file)
@@ -33,6 +33,8 @@
 #define MV_PHY_ALASKA_NBT_QUIRK_REV    (MARVELL_PHY_ID_88X3310 | 0xa)
 
 enum {
+       MV_PMA_FW_VER0          = 0xc011,
+       MV_PMA_FW_VER1          = 0xc012,
        MV_PMA_BOOT             = 0xc050,
        MV_PMA_BOOT_FATAL       = BIT(0),
 
@@ -73,7 +75,8 @@ enum {
 
        /* Vendor2 MMD registers */
        MV_V2_PORT_CTRL         = 0xf001,
-       MV_V2_PORT_CTRL_PWRDOWN = 0x0800,
+       MV_V2_PORT_CTRL_SWRST   = BIT(15),
+       MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
        MV_V2_TEMP_CTRL         = 0xf08a,
        MV_V2_TEMP_CTRL_MASK    = 0xc000,
        MV_V2_TEMP_CTRL_SAMPLE  = 0x0000,
@@ -83,6 +86,8 @@ enum {
 };
 
 struct mv3310_priv {
+       u32 firmware_ver;
+
        struct device *hwmon_dev;
        char *hwmon_name;
 };
@@ -235,8 +240,17 @@ static int mv3310_power_down(struct phy_device *phydev)
 
 static int mv3310_power_up(struct phy_device *phydev)
 {
-       return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
-                                 MV_V2_PORT_CTRL_PWRDOWN);
+       struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+       int ret;
+
+       ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+                                MV_V2_PORT_CTRL_PWRDOWN);
+
+       if (priv->firmware_ver < 0x00030000)
+               return ret;
+
+       return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+                               MV_V2_PORT_CTRL_SWRST);
 }
 
 static int mv3310_reset(struct phy_device *phydev, u32 unit)
@@ -355,6 +369,22 @@ static int mv3310_probe(struct phy_device *phydev)
 
        dev_set_drvdata(&phydev->mdio.dev, priv);
 
+       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_FW_VER0);
+       if (ret < 0)
+               return ret;
+
+       priv->firmware_ver = ret << 16;
+
+       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_FW_VER1);
+       if (ret < 0)
+               return ret;
+
+       priv->firmware_ver |= ret;
+
+       phydev_info(phydev, "Firmware version %u.%u.%u.%u\n",
+                   priv->firmware_ver >> 24, (priv->firmware_ver >> 16) & 255,
+                   (priv->firmware_ver >> 8) & 255, priv->firmware_ver & 255);
+
        /* Powering down the port when not in use saves about 600mW */
        ret = mv3310_power_down(phydev);
        if (ret)
index 522760c8bca6ec3d1b90b99c1b8315bb3e0ac23a..7a4eb3f2cb743a007903edaf8362eaee75602870 100644 (file)
@@ -464,7 +464,7 @@ static struct class mdio_bus_class = {
 
 /**
  * mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
- * @mdio_bus_np: Pointer to the mii_bus.
+ * @mdio_name: The name of a mdiobus.
  *
  * Returns a reference to the mii_bus, or NULL if none found.  The
  * embedded struct device will have its reference count incremented,
index 05d20343b8161c722afb2d0f33bda1616fcdb32e..3a4d83fa52dca3c31245428dc0cba5171a641838 100644 (file)
@@ -1204,7 +1204,7 @@ static struct phy_driver ksphy_driver[] = {
        .driver_data    = &ksz9021_type,
        .probe          = kszphy_probe,
        .config_init    = ksz9131_config_init,
-       .read_status    = ksz9031_read_status,
+       .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
        .get_sset_count = kszphy_get_sset_count,
index 07476c6510f2f50f8185b2be401a5894e37350c8..44889eba1dbc73bdf2f9e5f77e39919be4a43b14 100644 (file)
@@ -1888,6 +1888,7 @@ drop:
 
        skb_reset_network_header(skb);
        skb_probe_transport_header(skb);
+       skb_record_rx_queue(skb, tfile->queue_index);
 
        if (skb_xdp) {
                struct bpf_prog *xdp_prog;
@@ -2459,6 +2460,7 @@ build:
        skb->protocol = eth_type_trans(skb, tun->dev);
        skb_reset_network_header(skb);
        skb_probe_transport_header(skb);
+       skb_record_rx_queue(skb, tfile->queue_index);
 
        if (skb_xdp) {
                err = do_xdp_generic(xdp_prog, skb);
@@ -2470,7 +2472,6 @@ build:
            !tfile->detached)
                rxhash = __skb_get_hash_symmetric(skb);
 
-       skb_record_rx_queue(skb, tfile->queue_index);
        netif_receive_skb(skb);
 
        /* No need for get_cpu_ptr() here since this function is
index 459b8d49c184220ec0d82cbcc548d571f8d94102..f9af55f3682def093b461eb33530bda4719d3f3f 100644 (file)
@@ -36,12 +36,13 @@ static inline int ath11k_thermal_register(struct ath11k_base *sc)
        return 0;
 }
 
-static inline void ath11k_thermal_unregister(struct ath11k *ar)
+static inline void ath11k_thermal_unregister(struct ath11k_base *sc)
 {
 }
 
 static inline int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
 {
+       return 0;
 }
 
 static inline void ath11k_thermal_event_temperature(struct ath11k *ar,
index 23627c953a5ebd440ac126d931a6ed7b6edd3f2c..436f501be937aef441592ceee86049e1e3ea043e 100644 (file)
@@ -729,9 +729,18 @@ static int brcmf_net_mon_stop(struct net_device *ndev)
        return err;
 }
 
+static netdev_tx_t brcmf_net_mon_start_xmit(struct sk_buff *skb,
+                                           struct net_device *ndev)
+{
+       dev_kfree_skb_any(skb);
+
+       return NETDEV_TX_OK;
+}
+
 static const struct net_device_ops brcmf_netdev_ops_mon = {
        .ndo_open = brcmf_net_mon_open,
        .ndo_stop = brcmf_net_mon_stop,
+       .ndo_start_xmit = brcmf_net_mon_start_xmit,
 };
 
 int brcmf_net_mon_attach(struct brcmf_if *ifp)
index 7fe8207db6ae9d7dd1c99c302116e7293fe36426..7c4b7c31d07ab88e86a41dc5e1491b6edff1bef5 100644 (file)
@@ -3669,9 +3669,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
        }
 
        if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
-               hwname = kasprintf(GFP_KERNEL, "%.*s",
-                                  nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
-                                  (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+               hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+                                 nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+                                 GFP_KERNEL);
                if (!hwname)
                        return -ENOMEM;
                param.hwname = hwname;
@@ -3691,9 +3691,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
        if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
                idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
        } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
-               hwname = kasprintf(GFP_KERNEL, "%.*s",
-                                  nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
-                                  (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+               hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+                                 nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+                                 GFP_KERNEL);
                if (!hwname)
                        return -ENOMEM;
        } else
index e37c71495c0d31907f9fee039f394115cf108c87..1af87eb2e53ac90a0a9d30dd54cd1d9464528b82 100644 (file)
@@ -1338,22 +1338,17 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
        rtw_pci_link_cfg(rtwdev);
 }
 
-#ifdef CONFIG_PM
-static int rtw_pci_suspend(struct device *dev)
+static int __maybe_unused rtw_pci_suspend(struct device *dev)
 {
        return 0;
 }
 
-static int rtw_pci_resume(struct device *dev)
+static int __maybe_unused rtw_pci_resume(struct device *dev)
 {
        return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
-#define RTW_PM_OPS (&rtw_pm_ops)
-#else
-#define RTW_PM_OPS NULL
-#endif
 
 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
 {
@@ -1582,7 +1577,7 @@ static struct pci_driver rtw_pci_driver = {
        .id_table = rtw_pci_id_table,
        .probe = rtw_pci_probe,
        .remove = rtw_pci_remove,
-       .driver.pm = RTW_PM_OPS,
+       .driver.pm = &rtw_pm_ops,
 };
 module_pci_driver(rtw_pci_driver);
 
index c9219fddf44bea12fe7f8e14dfc3b224631c7bbb..50bbe0edf538022c2f9f2f13546499fe4addf643 100644 (file)
@@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop(
 
        of_property_set_flag(new_prop, OF_DYNAMIC);
 
+       kfree(target_path);
+
        return new_prop;
 
 err_free_new_prop:
index c190da54f1754f77cfdee751126e57842e91312f..6327d1ffb9636589857d6c92f011c23c2854dde1 100644 (file)
@@ -3,22 +3,37 @@
 /plugin/;
 
 /*
- * &electric_1/motor-1 and &spin_ctrl_1 are the same node:
- *   /testcase-data-2/substation@100/motor-1
+ * &electric_1/motor-1/electric and &spin_ctrl_1/electric are the same node:
+ *   /testcase-data-2/substation@100/motor-1/electric
  *
  * Thus the property "rpm_avail" in each fragment will
  * result in an attempt to update the same property twice.
  * This will result in an error and the overlay apply
  * will fail.
+ *
+ * The previous version of this test did not include the extra
+ * level of node 'electric'.  That resulted in the 'rpm_avail'
+ * property being located in the pre-existing node 'motor-1'.
+ * Modifying a property results in a WARNING that a memory leak
+ * will occur if the overlay is removed.  Since the overlay apply
+ * fails, the memory leak does actually occur, and kmemleak will
+ * further report the memory leak if CONFIG_DEBUG_KMEMLEAK is
+ * enabled.  Adding the overlay node 'electric' avoids the
+ * memory leak and thus people who use kmemleak will not
+ * have to debug this non-problem again.
  */
 
 &electric_1 {
 
        motor-1 {
-               rpm_avail = < 100 >;
+               electric {
+                       rpm_avail = < 100 >;
+               };
        };
 };
 
 &spin_ctrl_1 {
-               rpm_avail = < 100 200 >;
+               electric {
+                       rpm_avail = < 100 200 >;
+               };
 };
index 7e27670c36163f0c0e82ed254806b11d843b8a9b..398de04fd19c943e40ec0adb6d5bca92302c1979 100644 (file)
@@ -861,6 +861,10 @@ static void __init of_unittest_changeset(void)
        unittest(!of_changeset_revert(&chgset), "revert failed\n");
 
        of_changeset_destroy(&chgset);
+
+       of_node_put(n1);
+       of_node_put(n2);
+       of_node_put(n21);
 #endif
 }
 
@@ -1243,10 +1247,13 @@ static void __init of_unittest_platform_populate(void)
 
        of_platform_populate(np, match, NULL, &test_bus->dev);
        for_each_child_of_node(np, child) {
-               for_each_child_of_node(child, grandchild)
-                       unittest(of_find_device_by_node(grandchild),
+               for_each_child_of_node(child, grandchild) {
+                       pdev = of_find_device_by_node(grandchild);
+                       unittest(pdev,
                                 "Could not create device for node '%pOFn'\n",
                                 grandchild);
+                       of_dev_put(pdev);
+               }
        }
 
        of_platform_depopulate(&test_bus->dev);
@@ -3087,8 +3094,11 @@ static __init void of_unittest_overlay_high_level(void)
                                goto err_unlock;
                        }
                        if (__of_add_property(of_symbols, new_prop)) {
+                               kfree(new_prop->name);
+                               kfree(new_prop->value);
+                               kfree(new_prop);
                                /* "name" auto-generated by unflatten */
-                               if (!strcmp(new_prop->name, "name"))
+                               if (!strcmp(prop->name, "name"))
                                        continue;
                                unittest(0, "duplicate property '%s' in overlay_base node __symbols__",
                                         prop->name);
@@ -3171,21 +3181,21 @@ static __init void of_unittest_overlay_high_level(void)
                   "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller");
 
        EXPECT_BEGIN(KERN_ERR,
-                    "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+                    "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric");
        EXPECT_BEGIN(KERN_ERR,
-                    "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+                    "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail");
        EXPECT_BEGIN(KERN_ERR,
-                    "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/rpm_avail");
+                    "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name");
 
        unittest(overlay_data_apply("overlay_bad_add_dup_prop", NULL),
                 "Adding overlay 'overlay_bad_add_dup_prop' failed\n");
 
        EXPECT_END(KERN_ERR,
-                  "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/rpm_avail");
+                    "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name");
        EXPECT_END(KERN_ERR,
-                  "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+                    "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail");
        EXPECT_END(KERN_ERR,
-                  "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+                    "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric");
 
        unittest(overlay_data_apply("overlay_bad_phandle", NULL),
                 "Adding overlay 'overlay_bad_phandle' failed\n");
index ba43e6a3dc0aeeddab9409fd0adc6339815c841e..e4f01e7771a223d6915a5f231339b1fd5d0a3a4a 100644 (file)
@@ -819,6 +819,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
        if (unlikely(!target_freq)) {
                if (opp_table->required_opp_tables) {
                        ret = _set_required_opps(dev, opp_table, NULL);
+               } else if (!_get_opp_count(opp_table)) {
+                       return 0;
                } else {
                        dev_err(dev, "target frequency can't be 0\n");
                        ret = -EINVAL;
@@ -849,6 +851,18 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
                goto put_opp_table;
        }
 
+       /*
+        * For IO devices which require an OPP on some platforms/SoCs
+        * while just needing to scale the clock on some others
+        * we look for empty OPP tables with just a clock handle and
+        * scale only the clk. This makes dev_pm_opp_set_rate()
+        * equivalent to a clk_set_rate()
+        */
+       if (!_get_opp_count(opp_table)) {
+               ret = _generic_set_opp_clk_only(dev, clk, freq);
+               goto put_opp_table;
+       }
+
        temp_freq = old_freq;
        old_opp = _find_freq_ceil(opp_table, &temp_freq);
        if (IS_ERR(old_opp)) {
index 230e6cf3da2f71773cde303e0fe97c6e67009d93..c48e5b38a4417d27daee2efd6c42a71fd17b80f9 100644 (file)
@@ -40,7 +40,7 @@ cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub,
        int id = sample->sensor_id;
        struct iio_dev *indio_dev;
 
-       if (id > sensorhub->sensor_num)
+       if (id >= sensorhub->sensor_num)
                return -EINVAL;
 
        cb = sensorhub->push_data[id].push_data_cb;
@@ -820,7 +820,7 @@ static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub)
        if (fifo_info->count > sensorhub->fifo_size ||
            fifo_info->size != sensorhub->fifo_size) {
                dev_warn(sensorhub->dev,
-                        "Mismatch EC data: count %d, size %d - expected %d",
+                        "Mismatch EC data: count %d, size %d - expected %d\n",
                         fifo_info->count, fifo_info->size,
                         sensorhub->fifo_size);
                goto error;
@@ -851,14 +851,14 @@ static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub)
                }
                if (number_data > fifo_info->count - i) {
                        dev_warn(sensorhub->dev,
-                                "Invalid EC data: too many entry received: %d, expected %d",
+                                "Invalid EC data: too many entry received: %d, expected %d\n",
                                 number_data, fifo_info->count - i);
                        break;
                }
                if (out + number_data >
                    sensorhub->ring + fifo_info->count) {
                        dev_warn(sensorhub->dev,
-                                "Too many samples: %d (%zd data) to %d entries for expected %d entries",
+                                "Too many samples: %d (%zd data) to %d entries for expected %d entries\n",
                                 i, out - sensorhub->ring, i + number_data,
                                 fifo_info->count);
                        break;
index a8682f69effc4d39f39c9f9db297717550db07c5..376f1efbbb86046660d0bcf19a8d97a44f0311f4 100644 (file)
@@ -26,7 +26,6 @@ config DASD
        def_tristate y
        prompt "Support for DASD devices"
        depends on CCW && BLOCK
-       select IOSCHED_DEADLINE
        help
          Enable this option if you want to access DASDs directly utilizing
          S/390s channel subsystem commands. This is necessary for running
index 90a17452a50dc3beabf87ce2334e9c1f2c39a06f..13ed9073fc72fab3cdcb677924609cccc0c30056 100644 (file)
@@ -6,6 +6,7 @@ config SCSI_HISI_SAS
        select SCSI_SAS_LIBSAS
        select BLK_DEV_INTEGRITY
        depends on ATA
+       select SATA_HOST
        help
                This driver supports HiSilicon's SAS HBA, including support based
                on platform device
index f301a8048b2fe2af60ba2af85554aaa564c842be..bf1e98f11990198b4a6a41f28e6fc9f1c30817c4 100644 (file)
@@ -2539,7 +2539,6 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
 {
        va_list va;
        struct va_format vaf;
-       char pbuf[64];
 
        va_start(va, fmt);
 
@@ -2547,6 +2546,8 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
        vaf.va = &va;
 
        if (!ql_mask_match(level)) {
+               char pbuf[64];
+
                if (vha != NULL) {
                        const struct pci_dev *pdev = vha->hw->pdev;
                        /* <module-name> <msg-id>:<host> Message */
index 5b2deaa730bfd6adc344d144eeea4794ce53f619..caa6b840e459463a6cfa2e61745986f224bf7e6c 100644 (file)
@@ -3611,8 +3611,6 @@ qla24xx_detect_sfp(scsi_qla_host_t *vha)
                        ha->lr_distance = LR_DISTANCE_5K;
        }
 
-       if (!vha->flags.init_done)
-               rc = QLA_SUCCESS;
 out:
        ql_dbg(ql_dbg_async, vha, 0x507b,
            "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
index 8d7a905f624713bf576f9b31f67c91c4b7cd341b..8a78d395bbc8f92eeaa33ac8d41bdf3f10373a8a 100644 (file)
@@ -87,7 +87,6 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
        }
 
        /* terminate exchange */
-       memset(rsp_els, 0, sizeof(*rsp_els));
        rsp_els->entry_type = ELS_IOCB_TYPE;
        rsp_els->entry_count = 1;
        rsp_els->nport_handle = ~0;
index 9fd83d1bffe02d1e5d87c0e0d0a66d20f7717025..4ed90437e8c42e9f5c33200774a78e25bbcc7fc5 100644 (file)
@@ -4894,8 +4894,6 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
                return QLA_MEMORY_ALLOC_FAILED;
        }
 
-       memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE);
-
        els_cmd_map[index] |= 1 << bit;
 
        mcp->mb[0] = MBC_SET_RNID_PARAMS;
index 4e6af592f018e77b54c81624b8860de3826b594a..9c0ee192f0f9c8e9825c01b6fd91b9df96bbcd5b 100644 (file)
@@ -793,8 +793,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
                        "sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
                        (int) cmnd[0], (int) hp->cmd_len));
 
-       if (hp->dxfer_len >= SZ_256M)
+       if (hp->dxfer_len >= SZ_256M) {
+               sg_remove_request(sfp, srp);
                return -EINVAL;
+       }
 
        k = sg_start_req(srp, cmnd);
        if (k) {
index 6b4b354c88aa09d100b77bc6b1f4cf2dfb0929be..1e031d81e59e871dc96025e9963f3abe9723199d 100644 (file)
@@ -63,7 +63,7 @@ static int fc_get_pr_transport_id(
         * encoded TransportID.
         */
        ptr = &se_nacl->initiatorname[0];
-       for (i = 0; i < 24; ) {
+       for (i = 0; i < 23; ) {
                if (!strncmp(&ptr[i], ":", 1)) {
                        i++;
                        continue;
@@ -341,7 +341,8 @@ static char *iscsi_parse_pr_out_transport_id(
                        *p = tolower(*p);
                        p++;
                }
-       }
+       } else
+               *port_nexus_ptr = NULL;
 
        return &buf[4];
 }
index 0b9dfa6b17bc70c3302c90776cd220d0bf85dc81..f769bb1e373568e53a4290003876a545922bfced 100644 (file)
@@ -2073,6 +2073,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
        mb->cmd_tail = 0;
        mb->cmd_head = 0;
        tcmu_flush_dcache_range(mb, sizeof(*mb));
+       clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
 
        del_timer(&udev->cmd_timer);
 
index 91b0a719d221722a3932f828edcdd70ab9896a24..fa88e8b9a83dfb6bcdf15f7da8f932e0ef83308f 100644 (file)
@@ -472,7 +472,7 @@ config FB_OF
 
 config FB_CONTROL
        bool "Apple \"control\" display support"
-       depends on (FB = y) && PPC_PMAC && PPC32
+       depends on (FB = y) && ((PPC_PMAC && PPC32) || COMPILE_TEST)
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 314ab82e01c027503856d1e6fde3f1f94a6ae276..6f7838979f0a9463840d560512fab732bdd2cf22 100644 (file)
@@ -544,10 +544,6 @@ static int arcfb_probe(struct platform_device *dev)
        par->cslut[1] = 0x06;
        info->flags = FBINFO_FLAG_DEFAULT;
        spin_lock_init(&par->lock);
-       retval = register_framebuffer(info);
-       if (retval < 0)
-               goto err1;
-       platform_set_drvdata(dev, info);
        if (irq) {
                par->irq = irq;
                if (request_irq(par->irq, &arcfb_interrupt, IRQF_SHARED,
@@ -558,6 +554,10 @@ static int arcfb_probe(struct platform_device *dev)
                        goto err1;
                }
        }
+       retval = register_framebuffer(info);
+       if (retval < 0)
+               goto err1;
+       platform_set_drvdata(dev, info);
        fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
                videomemorysize >> 10);
 
@@ -593,6 +593,8 @@ static int arcfb_remove(struct platform_device *dev)
 
        if (info) {
                unregister_framebuffer(info);
+               if (irq)
+                       free_irq(((struct arcfb_par *)(info->par))->irq, info);
                vfree((void __force *)info->screen_base);
                framebuffer_release(info);
        }
index d567f5d56c13e316d087fa6cc99c25cc09ee98ad..1e252192569a8be75d81ec7496f72a4ecde18cd2 100644 (file)
@@ -1114,7 +1114,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
 
        sinfo->irq_base = platform_get_irq(pdev, 0);
        if (sinfo->irq_base < 0) {
-               dev_err(dev, "unable to get irq\n");
                ret = sinfo->irq_base;
                goto stop_clk;
        }
index d7e41c8dd533c785fa0a55dfaf11faeb88adaebf..d05d4195acade79cef91e9e2a900beab6ab60676 100644 (file)
@@ -334,20 +334,6 @@ static const struct aty128_meminfo sdr_128 = {
        .name = "128-bit SDR SGRAM (1:1)",
 };
 
-static const struct aty128_meminfo sdr_64 = {
-       .ML = 4,
-       .MB = 8,
-       .Trcd = 3,
-       .Trp = 3,
-       .Twr = 1,
-       .CL = 3,
-       .Tr2w = 1,
-       .LoopLatency = 17,
-       .DspOn = 46,
-       .Rloop = 17,
-       .name = "64-bit SDR SGRAM (1:1)",
-};
-
 static const struct aty128_meminfo sdr_sgram = {
        .ML = 4,
        .MB = 4,
index 175d2598f28e4253b7844577199bd59f9983def3..b0ac895e5ac9af2c5da973e93dbb2e778182b145 100644 (file)
 #ifdef DEBUG
 #define DPRINTK(fmt, args...)  printk(KERN_DEBUG "atyfb: " fmt, ## args)
 #else
-#define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...)  no_printk(fmt, ##args)
 #endif
 
 #define PRINTKI(fmt, args...)  printk(KERN_INFO "atyfb: " fmt, ## args)
@@ -3819,9 +3819,9 @@ static int __init atyfb_setup(char *options)
 
        while ((this_opt = strsep(&options, ",")) != NULL) {
                if (!strncmp(this_opt, "noaccel", 7)) {
-                       noaccel = 1;
+                       noaccel = true;
                } else if (!strncmp(this_opt, "nomtrr", 6)) {
-                       nomtrr = 1;
+                       nomtrr = true;
                } else if (!strncmp(this_opt, "vram:", 5))
                        vram = simple_strtoul(this_opt + 5, NULL, 0);
                else if (!strncmp(this_opt, "pll:", 4))
index 38b61cdb5ca409c701f0188e4bb20067cb5b7024..9c4f1be856eca46aefa7eec184167b858d456633 100644 (file)
@@ -31,7 +31,6 @@
  *  more details.
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/nvram.h>
 #include <linux/adb.h>
 #include <linux/cuda.h>
+#ifdef CONFIG_PPC_PMAC
 #include <asm/prom.h>
 #include <asm/btext.h>
+#endif
 
 #include "macmodes.h"
 #include "controlfb.h"
 
+#if !defined(CONFIG_PPC_PMAC) || !defined(CONFIG_PPC32)
+#define invalid_vram_cache(addr)
+#undef in_8
+#undef out_8
+#undef in_le32
+#undef out_le32
+#define in_8(addr)             0
+#define out_8(addr, val)
+#define in_le32(addr)          0
+#define out_le32(addr, val)
+#define pgprot_cached_wthru(prot) (prot)
+#else
+static void invalid_vram_cache(void __force *addr)
+{
+       eieio();
+       dcbf(addr);
+       mb();
+       eieio();
+       dcbf(addr);
+       mb();
+}
+#endif
+
 struct fb_par_control {
        int     vmode, cmode;
        int     xres, yres;
@@ -117,38 +141,6 @@ struct fb_info_control {
 #define CNTRL_REG(INFO,REG) (&(((INFO)->control_regs->REG).r))
 
 
-/******************** Prototypes for exported functions ********************/
-/*
- * struct fb_ops
- */
-static int controlfb_pan_display(struct fb_var_screeninfo *var,
-       struct fb_info *info);
-static int controlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
-       u_int transp, struct fb_info *info);
-static int controlfb_blank(int blank_mode, struct fb_info *info);
-static int controlfb_mmap(struct fb_info *info,
-       struct vm_area_struct *vma);
-static int controlfb_set_par (struct fb_info *info);
-static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info);
-
-/******************** Prototypes for internal functions **********************/
-
-static void set_control_clock(unsigned char *params);
-static int init_control(struct fb_info_control *p);
-static void control_set_hardware(struct fb_info_control *p,
-       struct fb_par_control *par);
-static int control_of_init(struct device_node *dp);
-static void find_vram_size(struct fb_info_control *p);
-static int read_control_sense(struct fb_info_control *p);
-static int calc_clock_params(unsigned long clk, unsigned char *param);
-static int control_var_to_par(struct fb_var_screeninfo *var,
-       struct fb_par_control *par, const struct fb_info *fb_info);
-static inline void control_par_to_var(struct fb_par_control *par,
-       struct fb_var_screeninfo *var);
-static void control_init_info(struct fb_info *info, struct fb_info_control *p);
-static void control_cleanup(void);
-
-
 /************************** Internal variables *******************************/
 
 static struct fb_info_control *control_fb;
@@ -157,189 +149,6 @@ static int default_vmode __initdata = VMODE_NVRAM;
 static int default_cmode __initdata = CMODE_NVRAM;
 
 
-static const struct fb_ops controlfb_ops = {
-       .owner          = THIS_MODULE,
-       .fb_check_var   = controlfb_check_var,
-       .fb_set_par     = controlfb_set_par,
-       .fb_setcolreg   = controlfb_setcolreg,
-       .fb_pan_display = controlfb_pan_display,
-       .fb_blank       = controlfb_blank,
-       .fb_mmap        = controlfb_mmap,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
-};
-
-
-/********************  The functions for controlfb_ops ********************/
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
-       struct device_node *dp;
-       int ret = -ENXIO;
-
-       dp = of_find_node_by_name(NULL, "control");
-       if (dp && !control_of_init(dp))
-               ret = 0;
-       of_node_put(dp);
-
-       return ret;
-}
-
-void cleanup_module(void)
-{
-       control_cleanup();
-}
-#endif
-
-/*
- * Checks a var structure
- */
-static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
-{
-       struct fb_par_control par;
-       int err;
-
-       err = control_var_to_par(var, &par, info);
-       if (err)
-               return err;     
-       control_par_to_var(&par, var);
-
-       return 0;
-}
-
-/*
- * Applies current var to display
- */
-static int controlfb_set_par (struct fb_info *info)
-{
-       struct fb_info_control *p =
-               container_of(info, struct fb_info_control, info);
-       struct fb_par_control par;
-       int err;
-
-       if((err = control_var_to_par(&info->var, &par, info))) {
-               printk (KERN_ERR "controlfb_set_par: error calling"
-                                " control_var_to_par: %d.\n", err);
-               return err;
-       }
-       
-       control_set_hardware(p, &par);
-
-       info->fix.visual = (p->par.cmode == CMODE_8) ?
-               FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-       info->fix.line_length = p->par.pitch;
-       info->fix.xpanstep = 32 >> p->par.cmode;
-       info->fix.ypanstep = 1;
-
-       return 0;
-}
-
-/*
- * Set screen start address according to var offset values
- */
-static inline void set_screen_start(int xoffset, int yoffset,
-       struct fb_info_control *p)
-{
-       struct fb_par_control *par = &p->par;
-
-       par->xoffset = xoffset;
-       par->yoffset = yoffset;
-       out_le32(CNTRL_REG(p,start_addr),
-                par->yoffset * par->pitch + (par->xoffset << par->cmode));
-}
-
-
-static int controlfb_pan_display(struct fb_var_screeninfo *var,
-                                struct fb_info *info)
-{
-       unsigned int xoffset, hstep;
-       struct fb_info_control *p =
-               container_of(info, struct fb_info_control, info);
-       struct fb_par_control *par = &p->par;
-
-       /*
-        * make sure start addr will be 32-byte aligned
-        */
-       hstep = 0x1f >> par->cmode;
-       xoffset = (var->xoffset + hstep) & ~hstep;
-
-       if (xoffset+par->xres > par->vxres ||
-           var->yoffset+par->yres > par->vyres)
-               return -EINVAL;
-
-       set_screen_start(xoffset, var->yoffset, p);
-
-       return 0;
-}
-
-
-/*
- * Private mmap since we want to have a different caching on the framebuffer
- * for controlfb.
- * Note there's no locking in here; it's done in fb_mmap() in fbmem.c.
- */
-static int controlfb_mmap(struct fb_info *info,
-                       struct vm_area_struct *vma)
-{
-       unsigned long mmio_pgoff;
-       unsigned long start;
-       u32 len;
-
-       start = info->fix.smem_start;
-       len = info->fix.smem_len;
-       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
-       if (vma->vm_pgoff >= mmio_pgoff) {
-               if (info->var.accel_flags)
-                       return -EINVAL;
-               vma->vm_pgoff -= mmio_pgoff;
-               start = info->fix.mmio_start;
-               len = info->fix.mmio_len;
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       } else {
-               /* framebuffer */
-               vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
-       }
-
-       return vm_iomap_memory(vma, start, len);
-}
-
-static int controlfb_blank(int blank_mode, struct fb_info *info)
-{
-       struct fb_info_control *p =
-               container_of(info, struct fb_info_control, info);
-       unsigned ctrl;
-
-       ctrl = le32_to_cpup(CNTRL_REG(p,ctrl));
-       if (blank_mode > 0)
-               switch (blank_mode) {
-               case FB_BLANK_VSYNC_SUSPEND:
-                       ctrl &= ~3;
-                       break;
-               case FB_BLANK_HSYNC_SUSPEND:
-                       ctrl &= ~0x30;
-                       break;
-               case FB_BLANK_POWERDOWN:
-                       ctrl &= ~0x33;
-                       /* fall through */
-               case FB_BLANK_NORMAL:
-                       ctrl |= 0x400;
-                       break;
-               default:
-                       break;
-               }
-       else {
-               ctrl &= ~0x400;
-               ctrl |= 0x33;
-       }
-       out_le32(CNTRL_REG(p,ctrl), ctrl);
-
-       return 0;
-}
-
 static int controlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                             u_int transp, struct fb_info *info)
 {
@@ -396,88 +205,31 @@ static void set_control_clock(unsigned char *params)
 #endif 
 }
 
-
 /*
- * finish off the driver initialization and register
+ * Set screen start address according to var offset values
  */
-static int __init init_control(struct fb_info_control *p)
+static inline void set_screen_start(int xoffset, int yoffset,
+       struct fb_info_control *p)
 {
-       int full, sense, vmode, cmode, vyres;
-       struct fb_var_screeninfo var;
-       int rc;
-       
-       printk(KERN_INFO "controlfb: ");
-
-       full = p->total_vram == 0x400000;
+       struct fb_par_control *par = &p->par;
 
-       /* Try to pick a video mode out of NVRAM if we have one. */
-       cmode = default_cmode;
-       if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
-               cmode = nvram_read_byte(NV_CMODE);
-       if (cmode < CMODE_8 || cmode > CMODE_32)
-               cmode = CMODE_8;
+       par->xoffset = xoffset;
+       par->yoffset = yoffset;
+       out_le32(CNTRL_REG(p,start_addr),
+                par->yoffset * par->pitch + (par->xoffset << par->cmode));
+}
 
-       vmode = default_vmode;
-       if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
-               vmode = nvram_read_byte(NV_VMODE);
-       if (vmode < 1 || vmode > VMODE_MAX ||
-           control_mac_modes[vmode - 1].m[full] < cmode) {
-               sense = read_control_sense(p);
-               printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
-               vmode = mac_map_monitor_sense(sense);
-               if (control_mac_modes[vmode - 1].m[full] < 0)
-                       vmode = VMODE_640_480_60;
-               cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
-       }
+#define RADACAL_WRITE(a,d) \
+       out_8(&p->cmap_regs->addr, (a)); \
+       out_8(&p->cmap_regs->dat,   (d))
 
-       /* Initialize info structure */
-       control_init_info(&p->info, p);
-
-       /* Setup default var */
-       if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
-               /* This shouldn't happen! */
-               printk("mac_vmode_to_var(%d, %d,) failed\n", vmode, cmode);
-try_again:
-               vmode = VMODE_640_480_60;
-               cmode = CMODE_8;
-               if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
-                       printk(KERN_ERR "controlfb: mac_vmode_to_var() failed\n");
-                       return -ENXIO;
-               }
-               printk(KERN_INFO "controlfb: ");
-       }
-       printk("using video mode %d and color mode %d.\n", vmode, cmode);
-
-       vyres = (p->total_vram - CTRLFB_OFF) / (var.xres << cmode);
-       if (vyres > var.yres)
-               var.yres_virtual = vyres;
-
-       /* Apply default var */
-       var.activate = FB_ACTIVATE_NOW;
-       rc = fb_set_var(&p->info, &var);
-       if (rc && (vmode != VMODE_640_480_60 || cmode != CMODE_8))
-               goto try_again;
-
-       /* Register with fbdev layer */
-       if (register_framebuffer(&p->info) < 0)
-               return -ENXIO;
-
-       fb_info(&p->info, "control display adapter\n");
-
-       return 0;
-}
-
-#define RADACAL_WRITE(a,d) \
-       out_8(&p->cmap_regs->addr, (a)); \
-       out_8(&p->cmap_regs->dat,   (d))
-
-/* Now how about actually saying, Make it so! */
-/* Some things in here probably don't need to be done each time. */
-static void control_set_hardware(struct fb_info_control *p, struct fb_par_control *par)
-{
-       struct control_regvals  *r;
-       volatile struct preg    __iomem *rp;
-       int                     i, cmode;
+/* Now how about actually saying, Make it so! */
+/* Some things in here probably don't need to be done each time. */
+static void control_set_hardware(struct fb_info_control *p, struct fb_par_control *par)
+{
+       struct control_regvals  *r;
+       volatile struct preg    __iomem *rp;
+       int                     i, cmode;
 
        if (PAR_EQUAL(&p->par, par)) {
                /*
@@ -528,67 +280,6 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
 #endif /* CONFIG_BOOTX_TEXT */
 }
 
-
-/*
- * Parse user specified options (`video=controlfb:')
- */
-static void __init control_setup(char *options)
-{
-       char *this_opt;
-
-       if (!options || !*options)
-               return;
-
-       while ((this_opt = strsep(&options, ",")) != NULL) {
-               if (!strncmp(this_opt, "vmode:", 6)) {
-                       int vmode = simple_strtoul(this_opt+6, NULL, 0);
-                       if (vmode > 0 && vmode <= VMODE_MAX &&
-                           control_mac_modes[vmode - 1].m[1] >= 0)
-                               default_vmode = vmode;
-               } else if (!strncmp(this_opt, "cmode:", 6)) {
-                       int depth = simple_strtoul(this_opt+6, NULL, 0);
-                       switch (depth) {
-                        case CMODE_8:
-                        case CMODE_16:
-                        case CMODE_32:
-                               default_cmode = depth;
-                               break;
-                        case 8:
-                               default_cmode = CMODE_8;
-                               break;
-                        case 15:
-                        case 16:
-                               default_cmode = CMODE_16;
-                               break;
-                        case 24:
-                        case 32:
-                               default_cmode = CMODE_32;
-                               break;
-                       }
-               }
-       }
-}
-
-static int __init control_init(void)
-{
-       struct device_node *dp;
-       char *option = NULL;
-       int ret = -ENXIO;
-
-       if (fb_get_options("controlfb", &option))
-               return -ENODEV;
-       control_setup(option);
-
-       dp = of_find_node_by_name(NULL, "control");
-       if (dp && !control_of_init(dp))
-               ret = 0;
-       of_node_put(dp);
-
-       return ret;
-}
-
-module_init(control_init);
-
 /* Work out which banks of VRAM we have installed. */
 /* danj: I guess the card just ignores writes to nonexistant VRAM... */
 
@@ -605,12 +296,7 @@ static void __init find_vram_size(struct fb_info_control *p)
 
        out_8(&p->frame_buffer[0x600000], 0xb3);
        out_8(&p->frame_buffer[0x600001], 0x71);
-       asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0x600000])
-                                       : "memory" );
-       mb();
-       asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0x600000])
-                                       : "memory" );
-       mb();
+       invalid_vram_cache(&p->frame_buffer[0x600000]);
 
        bank2 = (in_8(&p->frame_buffer[0x600000]) == 0xb3)
                && (in_8(&p->frame_buffer[0x600001]) == 0x71);
@@ -624,12 +310,7 @@ static void __init find_vram_size(struct fb_info_control *p)
 
        out_8(&p->frame_buffer[0], 0x5a);
        out_8(&p->frame_buffer[1], 0xc7);
-       asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0])
-                                       : "memory" );
-       mb();
-       asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0])
-                                       : "memory" );
-       mb();
+       invalid_vram_cache(&p->frame_buffer[0]);
 
        bank1 = (in_8(&p->frame_buffer[0]) == 0x5a)
                && (in_8(&p->frame_buffer[1]) == 0xc7);
@@ -663,78 +344,6 @@ static void __init find_vram_size(struct fb_info_control *p)
                        (bank1 + bank2) << 1, bank1 << 1, bank2 << 1);
 }
 
-
-/*
- * find "control" and initialize
- */
-static int __init control_of_init(struct device_node *dp)
-{
-       struct fb_info_control  *p;
-       struct resource         fb_res, reg_res;
-
-       if (control_fb) {
-               printk(KERN_ERR "controlfb: only one control is supported\n");
-               return -ENXIO;
-       }
-
-       if (of_pci_address_to_resource(dp, 2, &fb_res) ||
-           of_pci_address_to_resource(dp, 1, &reg_res)) {
-               printk(KERN_ERR "can't get 2 addresses for control\n");
-               return -ENXIO;
-       }
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
-       if (!p)
-               return -ENOMEM;
-       control_fb = p; /* save it for cleanups */
-
-       /* Map in frame buffer and registers */
-       p->fb_orig_base = fb_res.start;
-       p->fb_orig_size = resource_size(&fb_res);
-       /* use the big-endian aperture (??) */
-       p->frame_buffer_phys = fb_res.start + 0x800000;
-       p->control_regs_phys = reg_res.start;
-       p->control_regs_size = resource_size(&reg_res);
-
-       if (!p->fb_orig_base ||
-           !request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
-               p->fb_orig_base = 0;
-               goto error_out;
-       }
-       /* map at most 8MB for the frame buffer */
-       p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
-
-       if (!p->control_regs_phys ||
-           !request_mem_region(p->control_regs_phys, p->control_regs_size,
-           "controlfb regs")) {
-               p->control_regs_phys = 0;
-               goto error_out;
-       }
-       p->control_regs = ioremap(p->control_regs_phys, p->control_regs_size);
-
-       p->cmap_regs_phys = 0xf301b000;  /* XXX not in prom? */
-       if (!request_mem_region(p->cmap_regs_phys, 0x1000, "controlfb cmap")) {
-               p->cmap_regs_phys = 0;
-               goto error_out;
-       }
-       p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
-
-       if (!p->cmap_regs || !p->control_regs || !p->frame_buffer)
-               goto error_out;
-
-       find_vram_size(p);
-       if (!p->total_vram)
-               goto error_out;
-
-       if (init_control(p) < 0)
-               goto error_out;
-
-       return 0;
-
-error_out:
-       control_cleanup();
-       return -ENXIO;
-}
-
 /*
  * Get the monitor sense value.
  * Note that this can be called before calibrate_delay,
@@ -1019,6 +628,150 @@ static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeni
        var->pixclock >>= par->regvals.clock_params[2];
 }
 
+/********************  The functions for controlfb_ops ********************/
+
+/*
+ * Checks a var structure
+ */
+static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct fb_par_control par;
+       int err;
+
+       err = control_var_to_par(var, &par, info);
+       if (err)
+               return err;     
+       control_par_to_var(&par, var);
+
+       return 0;
+}
+
+/*
+ * Applies current var to display
+ */
+static int controlfb_set_par (struct fb_info *info)
+{
+       struct fb_info_control *p =
+               container_of(info, struct fb_info_control, info);
+       struct fb_par_control par;
+       int err;
+
+       if((err = control_var_to_par(&info->var, &par, info))) {
+               printk (KERN_ERR "controlfb_set_par: error calling"
+                                " control_var_to_par: %d.\n", err);
+               return err;
+       }
+       
+       control_set_hardware(p, &par);
+
+       info->fix.visual = (p->par.cmode == CMODE_8) ?
+               FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
+       info->fix.line_length = p->par.pitch;
+       info->fix.xpanstep = 32 >> p->par.cmode;
+       info->fix.ypanstep = 1;
+
+       return 0;
+}
+
+static int controlfb_pan_display(struct fb_var_screeninfo *var,
+                                struct fb_info *info)
+{
+       unsigned int xoffset, hstep;
+       struct fb_info_control *p =
+               container_of(info, struct fb_info_control, info);
+       struct fb_par_control *par = &p->par;
+
+       /*
+        * make sure start addr will be 32-byte aligned
+        */
+       hstep = 0x1f >> par->cmode;
+       xoffset = (var->xoffset + hstep) & ~hstep;
+
+       if (xoffset+par->xres > par->vxres ||
+           var->yoffset+par->yres > par->vyres)
+               return -EINVAL;
+
+       set_screen_start(xoffset, var->yoffset, p);
+
+       return 0;
+}
+
+static int controlfb_blank(int blank_mode, struct fb_info *info)
+{
+       struct fb_info_control __maybe_unused *p =
+               container_of(info, struct fb_info_control, info);
+       unsigned ctrl;
+
+       ctrl = in_le32(CNTRL_REG(p, ctrl));
+       if (blank_mode > 0)
+               switch (blank_mode) {
+               case FB_BLANK_VSYNC_SUSPEND:
+                       ctrl &= ~3;
+                       break;
+               case FB_BLANK_HSYNC_SUSPEND:
+                       ctrl &= ~0x30;
+                       break;
+               case FB_BLANK_POWERDOWN:
+                       ctrl &= ~0x33;
+                       /* fall through */
+               case FB_BLANK_NORMAL:
+                       ctrl |= 0x400;
+                       break;
+               default:
+                       break;
+               }
+       else {
+               ctrl &= ~0x400;
+               ctrl |= 0x33;
+       }
+       out_le32(CNTRL_REG(p,ctrl), ctrl);
+
+       return 0;
+}
+
+/*
+ * Private mmap since we want to have a different caching on the framebuffer
+ * for controlfb.
+ * Note there's no locking in here; it's done in fb_mmap() in fbmem.c.
+ */
+static int controlfb_mmap(struct fb_info *info,
+                       struct vm_area_struct *vma)
+{
+       unsigned long mmio_pgoff;
+       unsigned long start;
+       u32 len;
+
+       start = info->fix.smem_start;
+       len = info->fix.smem_len;
+       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+       if (vma->vm_pgoff >= mmio_pgoff) {
+               if (info->var.accel_flags)
+                       return -EINVAL;
+               vma->vm_pgoff -= mmio_pgoff;
+               start = info->fix.mmio_start;
+               len = info->fix.mmio_len;
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       } else {
+               /* framebuffer */
+               vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
+       }
+
+       return vm_iomap_memory(vma, start, len);
+}
+
+static const struct fb_ops controlfb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_check_var   = controlfb_check_var,
+       .fb_set_par     = controlfb_set_par,
+       .fb_setcolreg   = controlfb_setcolreg,
+       .fb_pan_display = controlfb_pan_display,
+       .fb_blank       = controlfb_blank,
+       .fb_mmap        = controlfb_mmap,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+};
+
 /*
  * Set misc info vars for this driver
  */
@@ -1045,6 +798,115 @@ static void __init control_init_info(struct fb_info *info, struct fb_info_contro
         info->fix.accel = FB_ACCEL_NONE;
 }
 
+/*
+ * Parse user specified options (`video=controlfb:')
+ */
+static void __init control_setup(char *options)
+{
+       char *this_opt;
+
+       if (!options || !*options)
+               return;
+
+       while ((this_opt = strsep(&options, ",")) != NULL) {
+               if (!strncmp(this_opt, "vmode:", 6)) {
+                       int vmode = simple_strtoul(this_opt+6, NULL, 0);
+                       if (vmode > 0 && vmode <= VMODE_MAX &&
+                           control_mac_modes[vmode - 1].m[1] >= 0)
+                               default_vmode = vmode;
+               } else if (!strncmp(this_opt, "cmode:", 6)) {
+                       int depth = simple_strtoul(this_opt+6, NULL, 0);
+                       switch (depth) {
+                        case CMODE_8:
+                        case CMODE_16:
+                        case CMODE_32:
+                               default_cmode = depth;
+                               break;
+                        case 8:
+                               default_cmode = CMODE_8;
+                               break;
+                        case 15:
+                        case 16:
+                               default_cmode = CMODE_16;
+                               break;
+                        case 24:
+                        case 32:
+                               default_cmode = CMODE_32;
+                               break;
+                       }
+               }
+       }
+}
+
+/*
+ * finish off the driver initialization and register
+ */
+static int __init init_control(struct fb_info_control *p)
+{
+       int full, sense, vmode, cmode, vyres;
+       struct fb_var_screeninfo var;
+       int rc;
+       
+       printk(KERN_INFO "controlfb: ");
+
+       full = p->total_vram == 0x400000;
+
+       /* Try to pick a video mode out of NVRAM if we have one. */
+       cmode = default_cmode;
+       if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
+               cmode = nvram_read_byte(NV_CMODE);
+       if (cmode < CMODE_8 || cmode > CMODE_32)
+               cmode = CMODE_8;
+
+       vmode = default_vmode;
+       if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
+               vmode = nvram_read_byte(NV_VMODE);
+       if (vmode < 1 || vmode > VMODE_MAX ||
+           control_mac_modes[vmode - 1].m[full] < cmode) {
+               sense = read_control_sense(p);
+               printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
+               vmode = mac_map_monitor_sense(sense);
+               if (control_mac_modes[vmode - 1].m[full] < 0)
+                       vmode = VMODE_640_480_60;
+               cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
+       }
+
+       /* Initialize info structure */
+       control_init_info(&p->info, p);
+
+       /* Setup default var */
+       if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
+               /* This shouldn't happen! */
+               printk("mac_vmode_to_var(%d, %d,) failed\n", vmode, cmode);
+try_again:
+               vmode = VMODE_640_480_60;
+               cmode = CMODE_8;
+               if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
+                       printk(KERN_ERR "controlfb: mac_vmode_to_var() failed\n");
+                       return -ENXIO;
+               }
+               printk(KERN_INFO "controlfb: ");
+       }
+       printk("using video mode %d and color mode %d.\n", vmode, cmode);
+
+       vyres = (p->total_vram - CTRLFB_OFF) / (var.xres << cmode);
+       if (vyres > var.yres)
+               var.yres_virtual = vyres;
+
+       /* Apply default var */
+       var.activate = FB_ACTIVATE_NOW;
+       rc = fb_set_var(&p->info, &var);
+       if (rc && (vmode != VMODE_640_480_60 || cmode != CMODE_8))
+               goto try_again;
+
+       /* Register with fbdev layer */
+       if (register_framebuffer(&p->info) < 0)
+               return -ENXIO;
+
+       fb_info(&p->info, "control display adapter\n");
+
+       return 0;
+}
 
 static void control_cleanup(void)
 {
@@ -1071,4 +933,93 @@ static void control_cleanup(void)
        kfree(p);
 }
 
+/*
+ * find "control" and initialize
+ */
+static int __init control_of_init(struct device_node *dp)
+{
+       struct fb_info_control  *p;
+       struct resource         fb_res, reg_res;
+
+       if (control_fb) {
+               printk(KERN_ERR "controlfb: only one control is supported\n");
+               return -ENXIO;
+       }
+
+       if (of_pci_address_to_resource(dp, 2, &fb_res) ||
+           of_pci_address_to_resource(dp, 1, &reg_res)) {
+               printk(KERN_ERR "can't get 2 addresses for control\n");
+               return -ENXIO;
+       }
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+       control_fb = p; /* save it for cleanups */
+
+       /* Map in frame buffer and registers */
+       p->fb_orig_base = fb_res.start;
+       p->fb_orig_size = resource_size(&fb_res);
+       /* use the big-endian aperture (??) */
+       p->frame_buffer_phys = fb_res.start + 0x800000;
+       p->control_regs_phys = reg_res.start;
+       p->control_regs_size = resource_size(&reg_res);
+
+       if (!p->fb_orig_base ||
+           !request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
+               p->fb_orig_base = 0;
+               goto error_out;
+       }
+       /* map at most 8MB for the frame buffer */
+       p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
+
+       if (!p->control_regs_phys ||
+           !request_mem_region(p->control_regs_phys, p->control_regs_size,
+           "controlfb regs")) {
+               p->control_regs_phys = 0;
+               goto error_out;
+       }
+       p->control_regs = ioremap(p->control_regs_phys, p->control_regs_size);
+
+       p->cmap_regs_phys = 0xf301b000;  /* XXX not in prom? */
+       if (!request_mem_region(p->cmap_regs_phys, 0x1000, "controlfb cmap")) {
+               p->cmap_regs_phys = 0;
+               goto error_out;
+       }
+       p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
+
+       if (!p->cmap_regs || !p->control_regs || !p->frame_buffer)
+               goto error_out;
+
+       find_vram_size(p);
+       if (!p->total_vram)
+               goto error_out;
+
+       if (init_control(p) < 0)
+               goto error_out;
+
+       return 0;
+
+error_out:
+       control_cleanup();
+       return -ENXIO;
+}
+
+static int __init control_init(void)
+{
+       struct device_node *dp;
+       char *option = NULL;
+       int ret = -ENXIO;
+
+       if (fb_get_options("controlfb", &option))
+               return -ENODEV;
+       control_setup(option);
+
+       dp = of_find_node_by_name(NULL, "control");
+       if (dp && !control_of_init(dp))
+               ret = 0;
+       of_node_put(dp);
+
+       return ret;
+}
 
+device_initcall(control_init);
index 8e2e19f3bf44196d09cd8b6c33d60e2ad12ef013..d62a1e43864e60367ed07b67d6ab6fcfac0240b1 100644 (file)
@@ -44,7 +44,7 @@
 #ifdef DEBUG
 #define DPRINTK(fmt, args...) printk(fmt,## args)
 #else
-#define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...) no_printk(fmt, ##args)
 #endif
 
 #define FBMON_FIX_HEADER  1
index 460826a7ad551d7efdbf9a1ba8a44c6698e6d63b..513f58f28b0fddcfdd44d6c3f7621464cc552786 100644 (file)
@@ -1160,12 +1160,14 @@ EXPORT_SYMBOL(cyber2000fb_detach);
 #define DDC_SDA_IN     (1 << 6)
 
 static void cyber2000fb_enable_ddc(struct cfb_info *cfb)
+       __acquires(&cfb->reg_b0_lock)
 {
        spin_lock(&cfb->reg_b0_lock);
        cyber2000fb_writew(0x1bf, 0x3ce, cfb);
 }
 
 static void cyber2000fb_disable_ddc(struct cfb_info *cfb)
+       __releases(&cfb->reg_b0_lock)
 {
        cyber2000fb_writew(0x0bf, 0x3ce, cfb);
        spin_unlock(&cfb->reg_b0_lock);
index aa7583d963ac99153f9848c463e7f6591bbb65c2..13bbf7fe13bf30ecbf388eb06a6de32744e4784f 100644 (file)
@@ -1966,13 +1966,13 @@ static int i810fb_setup(char *options)
        
        while ((this_opt = strsep(&options, ",")) != NULL) {
                if (!strncmp(this_opt, "mtrr", 4))
-                       mtrr = 1;
+                       mtrr = true;
                else if (!strncmp(this_opt, "accel", 5))
-                       accel = 1;
+                       accel = true;
                else if (!strncmp(this_opt, "extvga", 6))
-                       extvga = 1;
+                       extvga = true;
                else if (!strncmp(this_opt, "sync", 4))
-                       sync = 1;
+                       sync = true;
                else if (!strncmp(this_opt, "vram:", 5))
                        vram = (simple_strtoul(this_opt+5, NULL, 0));
                else if (!strncmp(this_opt, "voffset:", 8))
@@ -1998,7 +1998,7 @@ static int i810fb_setup(char *options)
                else if (!strncmp(this_opt, "vsync2:", 7))
                        vsync2 = simple_strtoul(this_opt+7, NULL, 0);
                else if (!strncmp(this_opt, "dcolor", 6))
-                       dcolor = 1;
+                       dcolor = true;
                else if (!strncmp(this_opt, "ddc3", 4))
                        ddc3 = true;
                else
index 370bf2553d43320ebb7036383a250b5f919cc43f..884b16efa7e8a93eae6d3b628e11d46142373d30 100644 (file)
@@ -172,6 +172,7 @@ struct imxfb_info {
        int                     num_modes;
 
        struct regulator        *lcd_pwr;
+       int                     lcd_pwr_enabled;
 };
 
 static const struct platform_device_id imxfb_devtype[] = {
@@ -801,16 +802,30 @@ static int imxfb_lcd_get_power(struct lcd_device *lcddev)
        return FB_BLANK_UNBLANK;
 }
 
+static int imxfb_regulator_set(struct imxfb_info *fbi, int enable)
+{
+       int ret;
+
+       if (enable == fbi->lcd_pwr_enabled)
+               return 0;
+
+       if (enable)
+               ret = regulator_enable(fbi->lcd_pwr);
+       else
+               ret = regulator_disable(fbi->lcd_pwr);
+
+       if (ret == 0)
+               fbi->lcd_pwr_enabled = enable;
+
+       return ret;
+}
+
 static int imxfb_lcd_set_power(struct lcd_device *lcddev, int power)
 {
        struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
 
-       if (!IS_ERR(fbi->lcd_pwr)) {
-               if (power == FB_BLANK_UNBLANK)
-                       return regulator_enable(fbi->lcd_pwr);
-               else
-                       return regulator_disable(fbi->lcd_pwr);
-       }
+       if (!IS_ERR(fbi->lcd_pwr))
+               return imxfb_regulator_set(fbi, power == FB_BLANK_UNBLANK);
 
        return 0;
 }
index c15f8a57498ed15e9538c18f78c5ced10c42144e..ff8e321a22cefc0b48c56fa73a25cab3219ddf7a 100644 (file)
@@ -333,11 +333,9 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
                         unsigned int *deltaarray)
 {
        unsigned int mnpcount;
-       unsigned int pixel_vco;
        const struct matrox_pll_limits* pi;
        struct matrox_pll_cache* ci;
 
-       pixel_vco = 0;
        switch (pll) {
                case M_PIXEL_PLL_A:
                case M_PIXEL_PLL_B:
@@ -420,7 +418,6 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
                                
                                mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16;
                                mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8;
-                               pixel_vco = g450_mnp2vco(minfo, mnp);
                                matroxfb_DAC_unlock_irqrestore(flags);
                        }
                        pi = &minfo->limits.video;
@@ -441,25 +438,6 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
                        unsigned int delta;
 
                        vco = g450_mnp2vco(minfo, mnp);
-#if 0                  
-                       if (pll == M_VIDEO_PLL) {
-                               unsigned int big, small;
-
-                               if (vco < pixel_vco) {
-                                       small = vco;
-                                       big = pixel_vco;
-                               } else {
-                                       small = pixel_vco;
-                                       big = vco;
-                               }
-                               while (big > small) {
-                                       big >>= 1;
-                               }
-                               if (big == small) {
-                                       continue;
-                               }
-                       }
-#endif                 
                        delta = pll_freq_delta(fout, g450_vco2f(mnp, vco));
                        for (idx = mnpcount; idx > 0; idx--) {
                                /* == is important; due to nextpll algorithm we get
index f85ad25659e56e6e27695c0f1a949812d37fbb9b..759dee996af1b0cc3ad4a94b6b174bde5c33c68a 100644 (file)
@@ -86,7 +86,7 @@
 #ifdef DEBUG
 #define dprintk(X...)  printk(X)
 #else
-#define dprintk(X...)
+#define dprintk(X...)  no_printk(X)
 #endif
 
 #ifndef PCI_SS_VENDOR_ID_SIEMENS_NIXDORF
index 42569264801fcc6d9798c3c45593348a42154287..d40b806461ca5c6f063731f2b9d54fffde120f6e 100644 (file)
@@ -184,7 +184,6 @@ static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy,
 static void mb86290fb_imageblit(struct fb_info *info,
                                const struct fb_image *image)
 {
-       int mdr;
        u32 *cmd = NULL;
        void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32,
                       const struct fb_image *, struct fb_info *) = NULL;
@@ -196,7 +195,6 @@ static void mb86290fb_imageblit(struct fb_info *info,
        u16 dx = image->dx, dy = image->dy;
        int x2, y2, vxres, vyres;
 
-       mdr = (GDC_ROP_COPY << 9);
        x2 = image->dx + image->width;
        y2 = image->dy + image->height;
        vxres = info->var.xres_virtual;
index 4af28e4421e5dba8da6c0727d29c3b787d4e5309..603731a5a72ed6d2ae216d7424b543701cb3980c 100644 (file)
@@ -509,7 +509,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
                          uint16_t h_start_width, uint16_t h_sync_width,
                          uint16_t h_end_width, uint16_t v_start_width,
                          uint16_t v_sync_width, uint16_t v_end_width,
-                         struct ipu_di_signal_cfg sig)
+                         const struct ipu_di_signal_cfg *sig)
 {
        unsigned long lock_flags;
        uint32_t reg;
@@ -591,17 +591,17 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
 
        /* DI settings */
        old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF;
-       old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT |
-               sig.clksel_en << DI_D3_CLK_SEL_SHIFT |
-               sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT;
+       old_conf |= sig->datamask_en << DI_D3_DATAMSK_SHIFT |
+               sig->clksel_en << DI_D3_CLK_SEL_SHIFT |
+               sig->clkidle_en << DI_D3_CLK_IDLE_SHIFT;
        mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF);
 
        old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF;
-       old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT |
-               sig.clk_pol << DI_D3_CLK_POL_SHIFT |
-               sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
-               sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
-               sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
+       old_conf |= sig->data_pol << DI_D3_DATA_POL_SHIFT |
+               sig->clk_pol << DI_D3_CLK_POL_SHIFT |
+               sig->enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
+               sig->Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
+               sig->Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
        mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
 
        map = &di_mappings[mx3fb->disp_data_fmt];
@@ -855,7 +855,7 @@ static int __set_par(struct fb_info *fbi, bool lock)
                                   fbi->var.upper_margin,
                                   fbi->var.vsync_len,
                                   fbi->var.lower_margin +
-                                  fbi->var.vsync_len, sig_cfg) != 0) {
+                                  fbi->var.vsync_len, &sig_cfg) != 0) {
                        dev_err(fbi->device,
                                "mx3fb: Error initializing panel.\n");
                        return -EINVAL;
index e8a304f84ea848783a5a2628af7bee0d660d0e84..1a9d6242916e935f841a77d84d2e165e6222842f 100644 (file)
@@ -1247,7 +1247,7 @@ static ssize_t omapfb_show_caps_num(struct device *dev,
        size = 0;
        while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
                omapfb_get_caps(fbdev, plane, &caps);
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                        "plane#%d %#010x %#010x %#010x\n",
                        plane, caps.ctrl, caps.plane_color, caps.wnd_color);
                plane++;
@@ -1268,28 +1268,28 @@ static ssize_t omapfb_show_caps_text(struct device *dev,
        size = 0;
        while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
                omapfb_get_caps(fbdev, plane, &caps);
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                 "plane#%d:\n", plane);
                for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
                     size < PAGE_SIZE; i++) {
                        if (ctrl_caps[i].flag & caps.ctrl)
-                               size += snprintf(&buf[size], PAGE_SIZE - size,
+                               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                        " %s\n", ctrl_caps[i].name);
                }
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                 " plane colors:\n");
                for (i = 0; i < ARRAY_SIZE(color_caps) &&
                     size < PAGE_SIZE; i++) {
                        if (color_caps[i].flag & caps.plane_color)
-                               size += snprintf(&buf[size], PAGE_SIZE - size,
+                               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                        "  %s\n", color_caps[i].name);
                }
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                 " window colors:\n");
                for (i = 0; i < ARRAY_SIZE(color_caps) &&
                     size < PAGE_SIZE; i++) {
                        if (color_caps[i].flag & caps.wnd_color)
-                               size += snprintf(&buf[size], PAGE_SIZE - size,
+                               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                        "  %s\n", color_caps[i].name);
                }
 
index ce37da85cc4587d980664774f91a4e260120d29c..4a16798b2ecd83efd390303813b92b22b4224d77 100644 (file)
@@ -557,11 +557,6 @@ u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel)
 }
 EXPORT_SYMBOL(dispc_mgr_get_sync_lost_irq);
 
-u32 dispc_wb_get_framedone_irq(void)
-{
-       return DISPC_IRQ_FRAMEDONEWB;
-}
-
 bool dispc_mgr_go_busy(enum omap_channel channel)
 {
        return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
@@ -579,30 +574,6 @@ void dispc_mgr_go(enum omap_channel channel)
 }
 EXPORT_SYMBOL(dispc_mgr_go);
 
-bool dispc_wb_go_busy(void)
-{
-       return REG_GET(DISPC_CONTROL2, 6, 6) == 1;
-}
-
-void dispc_wb_go(void)
-{
-       enum omap_plane plane = OMAP_DSS_WB;
-       bool enable, go;
-
-       enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
-
-       if (!enable)
-               return;
-
-       go = REG_GET(DISPC_CONTROL2, 6, 6) == 1;
-       if (go) {
-               DSSERR("GO bit not down for WB\n");
-               return;
-       }
-
-       REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6);
-}
-
 static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value)
 {
        dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
@@ -1028,13 +999,6 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
        }
 }
 
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel)
-{
-       enum omap_plane plane = OMAP_DSS_WB;
-
-       REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16);
-}
-
 static void dispc_ovl_set_burst_size(enum omap_plane plane,
                enum omap_burst_size burst_size)
 {
@@ -2805,74 +2769,6 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
 }
 EXPORT_SYMBOL(dispc_ovl_setup);
 
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
-               bool mem_to_mem, const struct omap_video_timings *mgr_timings)
-{
-       int r;
-       u32 l;
-       enum omap_plane plane = OMAP_DSS_WB;
-       const int pos_x = 0, pos_y = 0;
-       const u8 zorder = 0, global_alpha = 0;
-       const bool replication = false;
-       bool truncation;
-       int in_width = mgr_timings->x_res;
-       int in_height = mgr_timings->y_res;
-       enum omap_overlay_caps caps =
-               OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
-
-       DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
-               "rot %d, mir %d\n", wi->paddr, wi->p_uv_addr, in_width,
-               in_height, wi->width, wi->height, wi->color_mode, wi->rotation,
-               wi->mirror);
-
-       r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr,
-               wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
-               wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
-               wi->pre_mult_alpha, global_alpha, wi->rotation_type,
-               replication, mgr_timings, mem_to_mem);
-
-       switch (wi->color_mode) {
-       case OMAP_DSS_COLOR_RGB16:
-       case OMAP_DSS_COLOR_RGB24P:
-       case OMAP_DSS_COLOR_ARGB16:
-       case OMAP_DSS_COLOR_RGBA16:
-       case OMAP_DSS_COLOR_RGB12U:
-       case OMAP_DSS_COLOR_ARGB16_1555:
-       case OMAP_DSS_COLOR_XRGB16_1555:
-       case OMAP_DSS_COLOR_RGBX16:
-               truncation = true;
-               break;
-       default:
-               truncation = false;
-               break;
-       }
-
-       /* setup extra DISPC_WB_ATTRIBUTES */
-       l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
-       l = FLD_MOD(l, truncation, 10, 10);     /* TRUNCATIONENABLE */
-       l = FLD_MOD(l, mem_to_mem, 19, 19);     /* WRITEBACKMODE */
-       if (mem_to_mem)
-               l = FLD_MOD(l, 1, 26, 24);      /* CAPTUREMODE */
-       else
-               l = FLD_MOD(l, 0, 26, 24);      /* CAPTUREMODE */
-       dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
-
-       if (mem_to_mem) {
-               /* WBDELAYCOUNT */
-               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
-       } else {
-               int wbdelay;
-
-               wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
-                       mgr_timings->vbp, 255);
-
-               /* WBDELAYCOUNT */
-               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
-       }
-
-       return r;
-}
-
 int dispc_ovl_enable(enum omap_plane plane, bool enable)
 {
        DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2903,16 +2799,6 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
 }
 EXPORT_SYMBOL(dispc_mgr_is_enabled);
 
-void dispc_wb_enable(bool enable)
-{
-       dispc_ovl_enable(OMAP_DSS_WB, enable);
-}
-
-bool dispc_wb_is_enabled(void)
-{
-       return dispc_ovl_enabled(OMAP_DSS_WB);
-}
-
 static void dispc_lcd_enable_signal_polarity(bool act_high)
 {
        if (!dss_has_feature(FEAT_LCDENABLEPOL))
index a2269008590ff96de2133396a6a724ee6698529f..21cfcbf74a6d9d10fec2fb970570e73ffdbe7b91 100644 (file)
@@ -89,17 +89,6 @@ enum dss_dsi_content_type {
        DSS_DSI_CONTENT_GENERIC,
 };
 
-enum dss_writeback_channel {
-       DSS_WB_LCD1_MGR =       0,
-       DSS_WB_LCD2_MGR =       1,
-       DSS_WB_TV_MGR =         2,
-       DSS_WB_OVL0 =           3,
-       DSS_WB_OVL1 =           4,
-       DSS_WB_OVL2 =           5,
-       DSS_WB_OVL3 =           6,
-       DSS_WB_LCD3_MGR =       7,
-};
-
 enum dss_pll_id {
        DSS_PLL_DSI1,
        DSS_PLL_DSI2,
@@ -403,15 +392,6 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
                struct dispc_clock_info *cinfo);
 void dispc_set_tv_pclk(unsigned long pclk);
 
-u32 dispc_wb_get_framedone_irq(void);
-bool dispc_wb_go_busy(void);
-void dispc_wb_go(void);
-void dispc_wb_enable(bool enable);
-bool dispc_wb_is_enabled(void);
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
-               bool mem_to_mem, const struct omap_video_timings *timings);
-
 u32 dispc_read_irqstatus(void);
 void dispc_clear_irqstatus(u32 mask);
 u32 dispc_read_irqenable(void);
index f81e2a46366dda0aaccae8bc831fafcc43e3efd4..d5404d56c922fdaed92f889d7c910261fe80a920 100644 (file)
@@ -209,49 +209,6 @@ static const struct venc_config venc_config_ntsc_trm = {
        .gen_ctrl                               = 0x00F90000,
 };
 
-static const struct venc_config venc_config_pal_bdghi = {
-       .f_control                              = 0,
-       .vidout_ctrl                            = 0,
-       .sync_ctrl                              = 0,
-       .hfltr_ctrl                             = 0,
-       .x_color                                = 0,
-       .line21                                 = 0,
-       .ln_sel                                 = 21,
-       .htrigger_vtrigger                      = 0,
-       .tvdetgp_int_start_stop_x               = 0x00140001,
-       .tvdetgp_int_start_stop_y               = 0x00010001,
-       .gen_ctrl                               = 0x00FB0000,
-
-       .llen                                   = 864-1,
-       .flens                                  = 625-1,
-       .cc_carr_wss_carr                       = 0x2F7625ED,
-       .c_phase                                = 0xDF,
-       .gain_u                                 = 0x111,
-       .gain_v                                 = 0x181,
-       .gain_y                                 = 0x140,
-       .black_level                            = 0x3e,
-       .blank_level                            = 0x3e,
-       .m_control                              = 0<<2 | 1<<1,
-       .bstamp_wss_data                        = 0x42,
-       .s_carr                                 = 0x2a098acb,
-       .l21__wc_ctl                            = 0<<13 | 0x16<<8 | 0<<0,
-       .savid__eavid                           = 0x06A70108,
-       .flen__fal                              = 23<<16 | 624<<0,
-       .lal__phase_reset                       = 2<<17 | 310<<0,
-       .hs_int_start_stop_x                    = 0x00920358,
-       .hs_ext_start_stop_x                    = 0x000F035F,
-       .vs_int_start_x                         = 0x1a7<<16,
-       .vs_int_stop_x__vs_int_start_y          = 0x000601A7,
-       .vs_int_stop_y__vs_ext_start_x          = 0x01AF0036,
-       .vs_ext_stop_x__vs_ext_start_y          = 0x27101af,
-       .vs_ext_stop_y                          = 0x05,
-       .avid_start_stop_x                      = 0x03530082,
-       .avid_start_stop_y                      = 0x0270002E,
-       .fid_int_start_x__fid_int_start_y       = 0x0005008A,
-       .fid_int_offset_y__fid_ext_start_x      = 0x002E0138,
-       .fid_ext_start_y__fid_ext_offset_y      = 0x01380005,
-};
-
 const struct omap_video_timings omap_dss_pal_timings = {
        .x_res          = 720,
        .y_res          = 574,
index 4a5db170ef59d82de4721d3fb04b7cd5cabb8424..2d39dbfa742e77abd11e50ae8bb0f05d026f027b 100644 (file)
@@ -147,11 +147,11 @@ static ssize_t show_overlays(struct device *dev,
                        if (ovl == fbdev->overlays[ovlnum])
                                break;
 
-               l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+               l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
                                t == 0 ? "" : ",", ovlnum);
        }
 
-       l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+       l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
 
        omapfb_unlock(fbdev);
        unlock_fb_info(fbi);
@@ -328,11 +328,11 @@ static ssize_t show_overlays_rotate(struct device *dev,
        lock_fb_info(fbi);
 
        for (t = 0; t < ofbi->num_overlays; t++) {
-               l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+               l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
                                t == 0 ? "" : ",", ofbi->rotation[t]);
        }
 
-       l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+       l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
 
        unlock_fb_info(fbi);
 
index fe2cadeb1b66429159529ef4d77ab7272c3934ea..c7c98d8e235928f538e04f805b7fe0faa3daf358 100644 (file)
@@ -54,7 +54,7 @@
 #define DPRINTK(a, b...)       \
        printk(KERN_DEBUG "pm2fb: %s: " a, __func__ , ## b)
 #else
-#define DPRINTK(a, b...)
+#define DPRINTK(a, b...)       no_printk(a, ##b)
 #endif
 
 #define PM2_PIXMAP_SIZE        (1600 * 4)
index 2f5e23c8f8ec2bf0d1473ce8384932b9d3775764..7497bd36334c015f07c4b78de6eba81943d43944 100644 (file)
@@ -44,7 +44,7 @@
 #define DPRINTK(a, b...)       \
        printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b)
 #else
-#define DPRINTK(a, b...)
+#define DPRINTK(a, b...)       no_printk(a, ##b)
 #endif
 
 #define PM3_PIXMAP_SIZE        (2048 * 4)
@@ -306,7 +306,7 @@ static void pm3fb_init_engine(struct fb_info *info)
                                           PM3PixelSize_GLOBAL_32BIT);
                        break;
                default:
-                       DPRINTK(1, "Unsupported depth %d\n",
+                       DPRINTK("Unsupported depth %d\n",
                                info->var.bits_per_pixel);
                        break;
                }
@@ -349,8 +349,8 @@ static void pm3fb_init_engine(struct fb_info *info)
                                           (1 << 10) | (0 << 3));
                        break;
                default:
-                       DPRINTK(1, "Unsupported depth %d\n",
-                               info->current_par->depth);
+                       DPRINTK("Unsupported depth %d\n",
+                               info->var.bits_per_pixel);
                        break;
                }
        }
index aef8a3042590dd367f78b3081819853b7e63f301..eedfbd3572a8faa41db5a80c2704e7fba5174ff1 100644 (file)
@@ -557,12 +557,11 @@ static const struct fb_ops pxa168fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static int pxa168fb_init_mode(struct fb_info *info,
+static void pxa168fb_init_mode(struct fb_info *info,
                              struct pxa168fb_mach_info *mi)
 {
        struct pxa168fb_info *fbi = info->par;
        struct fb_var_screeninfo *var = &info->var;
-       int ret = 0;
        u32 total_w, total_h, refresh;
        u64 div_result;
        const struct fb_videomode *m;
@@ -593,8 +592,6 @@ static int pxa168fb_init_mode(struct fb_info *info,
        div_result = 1000000000000ll;
        do_div(div_result, total_w * total_h * refresh);
        var->pixclock = (u32)div_result;
-
-       return ret;
 }
 
 static int pxa168fb_probe(struct platform_device *pdev)
index 0601c13f21050970f7112b6f9be52034827a343d..08c9ee46978ef6d7c2cae8c0cd5e65d66d82712d 100644 (file)
@@ -1343,24 +1343,6 @@ int CalcStateExt
 /*
  * Load fixed function state and pre-calculated/stored state.
  */
-#if 0
-#define LOAD_FIXED_STATE(tbl,dev)                                       \
-    for (i = 0; i < sizeof(tbl##Table##dev)/8; i++)                 \
-        chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1]
-#define LOAD_FIXED_STATE_8BPP(tbl,dev)                                  \
-    for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++)            \
-        chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1]
-#define LOAD_FIXED_STATE_15BPP(tbl,dev)                                 \
-    for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++)           \
-        chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1]
-#define LOAD_FIXED_STATE_16BPP(tbl,dev)                                 \
-    for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++)           \
-        chip->dev[tbl##Table##dev##_16BPP[i][0]] = tbl##Table##dev##_16BPP[i][1]
-#define LOAD_FIXED_STATE_32BPP(tbl,dev)                                 \
-    for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++)           \
-        chip->dev[tbl##Table##dev##_32BPP[i][0]] = tbl##Table##dev##_32BPP[i][1]
-#endif
-
 #define LOAD_FIXED_STATE(tbl,dev)                                       \
     for (i = 0; i < sizeof(tbl##Table##dev)/8; i++)                 \
         NV_WR32(&chip->dev[tbl##Table##dev[i][0]], 0, tbl##Table##dev[i][1])
index eaea8c37375348a666e6bd60cc9b149cfe88af6e..4541afcf9386ec25d640772de014c02d3f4d2869 100644 (file)
@@ -721,9 +721,7 @@ static void s1d13xxxfb_fetch_hw_state(struct fb_info *info)
                xres, yres, xres_virtual, yres_virtual, is_color, is_dual, is_tft);
 }
 
-
-static int
-s1d13xxxfb_remove(struct platform_device *pdev)
+static void __s1d13xxxfb_remove(struct platform_device *pdev)
 {
        struct fb_info *info = platform_get_drvdata(pdev);
        struct s1d13xxxfb_par *par = NULL;
@@ -749,6 +747,14 @@ s1d13xxxfb_remove(struct platform_device *pdev)
                           resource_size(&pdev->resource[0]));
        release_mem_region(pdev->resource[1].start,
                           resource_size(&pdev->resource[1]));
+}
+
+static int s1d13xxxfb_remove(struct platform_device *pdev)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+
+       unregister_framebuffer(info);
+       __s1d13xxxfb_remove(pdev);
        return 0;
 }
 
@@ -895,7 +901,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
        return 0;
 
 bail:
-       s1d13xxxfb_remove(pdev);
+       __s1d13xxxfb_remove(pdev);
        return ret;
 
 }
index 2d285cc384cfd8b6732daa5b573d5c4757d3a8f0..3e6e13f7a831968c59edc1e136733c3618f6cebf 100644 (file)
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/mutex.h>
@@ -799,8 +799,8 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
        writel_relaxed(fbi->dbar2, fbi->base + DBAR2);
        writel_relaxed(fbi->reg_lccr0 | LCCR0_LEN, fbi->base + LCCR0);
 
-       if (machine_is_shannon())
-               gpio_set_value(SHANNON_GPIO_DISP_EN, 1);
+       if (fbi->shannon_lcden)
+               gpiod_set_value(fbi->shannon_lcden, 1);
 
        dev_dbg(fbi->dev, "DBAR1: 0x%08x\n", readl_relaxed(fbi->base + DBAR1));
        dev_dbg(fbi->dev, "DBAR2: 0x%08x\n", readl_relaxed(fbi->base + DBAR2));
@@ -817,8 +817,8 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
 
        dev_dbg(fbi->dev, "Disabling LCD controller\n");
 
-       if (machine_is_shannon())
-               gpio_set_value(SHANNON_GPIO_DISP_EN, 0);
+       if (fbi->shannon_lcden)
+               gpiod_set_value(fbi->shannon_lcden, 0);
 
        set_current_state(TASK_UNINTERRUPTIBLE);
        add_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1173,12 +1173,10 @@ static int sa1100fb_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (machine_is_shannon()) {
-               ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN,
-                       GPIOF_OUT_INIT_LOW, "display enable");
-               if (ret)
-                       return ret;
-       }
+       fbi->shannon_lcden = gpiod_get_optional(&pdev->dev, "shannon-lcden",
+                                               GPIOD_OUT_LOW);
+       if (IS_ERR(fbi->shannon_lcden))
+               return PTR_ERR(fbi->shannon_lcden);
 
        /* Initialize video memory */
        ret = sa1100fb_map_video_memory(fbi);
index d0aa33b0b88a8c07c0f94450fec8cdcf9458c4f0..b4363444fa5dd35b2453a212f80e069d2f04cc02 100644 (file)
@@ -10,6 +10,8 @@
  * for more details.
  */
 
+struct gpio_desc;
+
 #define LCCR0           0x0000          /* LCD Control Reg. 0 */
 #define LCSR            0x0004          /* LCD Status Reg. */
 #define DBAR1           0x0010          /* LCD DMA Base Address Reg. channel 1 */
@@ -33,6 +35,7 @@ struct sa1100fb_info {
        struct device           *dev;
        const struct sa1100fb_rgb *rgb[NR_RGB];
        void __iomem            *base;
+       struct gpio_desc        *shannon_lcden;
 
        /*
         * These are the addresses we mapped
index aba04afe712d02afdddc3c2dc7d1b736e2a7ae2e..3314d5b6b43ba6e0c812ebf551669aaee41da307 100644 (file)
@@ -21,7 +21,7 @@
 #ifdef SAVAGEFB_DEBUG
 # define DBG(x)                printk (KERN_DEBUG "savagefb: %s\n", (x));
 #else
-# define DBG(x)
+# define DBG(x)                no_printk(x)
 # define SavagePrintRegs(...)
 #endif
 
index 12fa1050f3eb9d54f2673e46425e6e53a0652707..8e06ba912d60adc8ef4dbbda480e2f348ae674c9 100644 (file)
@@ -12,8 +12,7 @@
 #include <linux/i2c.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
 #include <linux/pwm.h>
 #include <linux/uaccess.h>
 #include <linux/regulator/consumer.h>
@@ -49,8 +48,6 @@
 static u_int refreshrate = REFRESHRATE;
 module_param(refreshrate, uint, 0);
 
-struct ssd1307fb_par;
-
 struct ssd1307fb_deviceinfo {
        u32 default_vcomh;
        u32 default_dclk_div;
@@ -80,7 +77,6 @@ struct ssd1307fb_par {
        u32 prechargep1;
        u32 prechargep2;
        struct pwm_device *pwm;
-       u32 pwm_period;
        struct gpio_desc *reset;
        struct regulator *vbat_reg;
        u32 vcomh;
@@ -298,9 +294,9 @@ static void ssd1307fb_deferred_io(struct fb_info *info,
 
 static int ssd1307fb_init(struct ssd1307fb_par *par)
 {
+       struct pwm_state pwmstate;
        int ret;
        u32 precharge, dclk, com_invdir, compins;
-       struct pwm_args pargs;
 
        if (par->device_info->need_pwm) {
                par->pwm = pwm_get(&par->client->dev, NULL);
@@ -309,21 +305,15 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
                        return PTR_ERR(par->pwm);
                }
 
-               /*
-                * FIXME: pwm_apply_args() should be removed when switching to
-                * the atomic PWM API.
-                */
-               pwm_apply_args(par->pwm);
-
-               pwm_get_args(par->pwm, &pargs);
+               pwm_init_state(par->pwm, &pwmstate);
+               pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
+               pwm_apply_state(par->pwm, &pwmstate);
 
-               par->pwm_period = pargs.period;
                /* Enable the PWM */
-               pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period);
                pwm_enable(par->pwm);
 
                dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n",
-                       par->pwm->pwm, par->pwm_period);
+                       par->pwm->pwm, pwm_get_period(par->pwm));
        }
 
        /* Set initial contrast */
@@ -586,25 +576,19 @@ static const struct of_device_id ssd1307fb_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ssd1307fb_of_match);
 
-static int ssd1307fb_probe(struct i2c_client *client,
-                          const struct i2c_device_id *id)
+static int ssd1307fb_probe(struct i2c_client *client)
 {
+       struct device *dev = &client->dev;
        struct backlight_device *bl;
        char bl_name[12];
        struct fb_info *info;
-       struct device_node *node = client->dev.of_node;
        struct fb_deferred_io *ssd1307fb_defio;
        u32 vmem_size;
        struct ssd1307fb_par *par;
        void *vmem;
        int ret;
 
-       if (!node) {
-               dev_err(&client->dev, "No device tree data found!\n");
-               return -EINVAL;
-       }
-
-       info = framebuffer_alloc(sizeof(struct ssd1307fb_par), &client->dev);
+       info = framebuffer_alloc(sizeof(struct ssd1307fb_par), dev);
        if (!info)
                return -ENOMEM;
 
@@ -612,67 +596,65 @@ static int ssd1307fb_probe(struct i2c_client *client,
        par->info = info;
        par->client = client;
 
-       par->device_info = of_device_get_match_data(&client->dev);
+       par->device_info = device_get_match_data(dev);
 
-       par->reset = devm_gpiod_get_optional(&client->dev, "reset",
-                                            GPIOD_OUT_LOW);
+       par->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
        if (IS_ERR(par->reset)) {
-               dev_err(&client->dev, "failed to get reset gpio: %ld\n",
+               dev_err(dev, "failed to get reset gpio: %ld\n",
                        PTR_ERR(par->reset));
                ret = PTR_ERR(par->reset);
                goto fb_alloc_error;
        }
 
-       par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
+       par->vbat_reg = devm_regulator_get_optional(dev, "vbat");
        if (IS_ERR(par->vbat_reg)) {
                ret = PTR_ERR(par->vbat_reg);
                if (ret == -ENODEV) {
                        par->vbat_reg = NULL;
                } else {
-                       dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
-                               ret);
+                       dev_err(dev, "failed to get VBAT regulator: %d\n", ret);
                        goto fb_alloc_error;
                }
        }
 
-       if (of_property_read_u32(node, "solomon,width", &par->width))
+       if (device_property_read_u32(dev, "solomon,width", &par->width))
                par->width = 96;
 
-       if (of_property_read_u32(node, "solomon,height", &par->height))
+       if (device_property_read_u32(dev, "solomon,height", &par->height))
                par->height = 16;
 
-       if (of_property_read_u32(node, "solomon,page-offset", &par->page_offset))
+       if (device_property_read_u32(dev, "solomon,page-offset", &par->page_offset))
                par->page_offset = 1;
 
-       if (of_property_read_u32(node, "solomon,com-offset", &par->com_offset))
+       if (device_property_read_u32(dev, "solomon,com-offset", &par->com_offset))
                par->com_offset = 0;
 
-       if (of_property_read_u32(node, "solomon,prechargep1", &par->prechargep1))
+       if (device_property_read_u32(dev, "solomon,prechargep1", &par->prechargep1))
                par->prechargep1 = 2;
 
-       if (of_property_read_u32(node, "solomon,prechargep2", &par->prechargep2))
+       if (device_property_read_u32(dev, "solomon,prechargep2", &par->prechargep2))
                par->prechargep2 = 2;
 
-       if (!of_property_read_u8_array(node, "solomon,lookup-table",
-                                      par->lookup_table,
-                                      ARRAY_SIZE(par->lookup_table)))
+       if (!device_property_read_u8_array(dev, "solomon,lookup-table",
+                                          par->lookup_table,
+                                          ARRAY_SIZE(par->lookup_table)))
                par->lookup_table_set = 1;
 
-       par->seg_remap = !of_property_read_bool(node, "solomon,segment-no-remap");
-       par->com_seq = of_property_read_bool(node, "solomon,com-seq");
-       par->com_lrremap = of_property_read_bool(node, "solomon,com-lrremap");
-       par->com_invdir = of_property_read_bool(node, "solomon,com-invdir");
+       par->seg_remap = !device_property_read_bool(dev, "solomon,segment-no-remap");
+       par->com_seq = device_property_read_bool(dev, "solomon,com-seq");
+       par->com_lrremap = device_property_read_bool(dev, "solomon,com-lrremap");
+       par->com_invdir = device_property_read_bool(dev, "solomon,com-invdir");
        par->area_color_enable =
-               of_property_read_bool(node, "solomon,area-color-enable");
-       par->low_power = of_property_read_bool(node, "solomon,low-power");
+               device_property_read_bool(dev, "solomon,area-color-enable");
+       par->low_power = device_property_read_bool(dev, "solomon,low-power");
 
        par->contrast = 127;
        par->vcomh = par->device_info->default_vcomh;
 
        /* Setup display timing */
-       if (of_property_read_u32(node, "solomon,dclk-div", &par->dclk_div))
+       if (device_property_read_u32(dev, "solomon,dclk-div", &par->dclk_div))
                par->dclk_div = par->device_info->default_dclk_div;
-       if (of_property_read_u32(node, "solomon,dclk-frq", &par->dclk_frq))
+       if (device_property_read_u32(dev, "solomon,dclk-frq", &par->dclk_frq))
                par->dclk_frq = par->device_info->default_dclk_frq;
 
        vmem_size = DIV_ROUND_UP(par->width, 8) * par->height;
@@ -680,15 +662,15 @@ static int ssd1307fb_probe(struct i2c_client *client,
        vmem = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                        get_order(vmem_size));
        if (!vmem) {
-               dev_err(&client->dev, "Couldn't allocate graphical memory.\n");
+               dev_err(dev, "Couldn't allocate graphical memory.\n");
                ret = -ENOMEM;
                goto fb_alloc_error;
        }
 
-       ssd1307fb_defio = devm_kzalloc(&client->dev, sizeof(*ssd1307fb_defio),
+       ssd1307fb_defio = devm_kzalloc(dev, sizeof(*ssd1307fb_defio),
                                       GFP_KERNEL);
        if (!ssd1307fb_defio) {
-               dev_err(&client->dev, "Couldn't allocate deferred io.\n");
+               dev_err(dev, "Couldn't allocate deferred io.\n");
                ret = -ENOMEM;
                goto fb_alloc_error;
        }
@@ -726,8 +708,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
        if (par->vbat_reg) {
                ret = regulator_enable(par->vbat_reg);
                if (ret) {
-                       dev_err(&client->dev, "failed to enable VBAT: %d\n",
-                               ret);
+                       dev_err(dev, "failed to enable VBAT: %d\n", ret);
                        goto reset_oled_error;
                }
        }
@@ -738,17 +719,16 @@ static int ssd1307fb_probe(struct i2c_client *client,
 
        ret = register_framebuffer(info);
        if (ret) {
-               dev_err(&client->dev, "Couldn't register the framebuffer\n");
+               dev_err(dev, "Couldn't register the framebuffer\n");
                goto panel_init_error;
        }
 
        snprintf(bl_name, sizeof(bl_name), "ssd1307fb%d", info->node);
-       bl = backlight_device_register(bl_name, &client->dev, par,
-                                      &ssd1307fb_bl_ops, NULL);
+       bl = backlight_device_register(bl_name, dev, par, &ssd1307fb_bl_ops,
+                                      NULL);
        if (IS_ERR(bl)) {
                ret = PTR_ERR(bl);
-               dev_err(&client->dev, "unable to register backlight device: %d\n",
-                       ret);
+               dev_err(dev, "unable to register backlight device: %d\n", ret);
                goto bl_init_error;
        }
 
@@ -756,7 +736,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
        bl->props.max_brightness = MAX_CONTRAST;
        info->bl_dev = bl;
 
-       dev_info(&client->dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
+       dev_info(dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
 
        return 0;
 
@@ -810,7 +790,7 @@ static const struct i2c_device_id ssd1307fb_i2c_id[] = {
 MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id);
 
 static struct i2c_driver ssd1307fb_driver = {
-       .probe = ssd1307fb_probe,
+       .probe_new = ssd1307fb_probe,
        .remove = ssd1307fb_remove,
        .id_table = ssd1307fb_i2c_id,
        .driver = {
index 07905d385949ec3321df82c0b410ff9b75aa4fac..5b014b479f831b8468886ceacb1c1d70cae165fb 100644 (file)
@@ -64,9 +64,9 @@ static const struct usb_device_id id_table[] = {
 MODULE_DEVICE_TABLE(usb, id_table);
 
 /* module options */
-static bool console = 1; /* Allow fbcon to open framebuffer */
-static bool fb_defio = 1;  /* Detect mmap writes using page faults */
-static bool shadow = 1; /* Optionally disable shadow framebuffer */
+static bool console = true; /* Allow fbcon to open framebuffer */
+static bool fb_defio = true;  /* Detect mmap writes using page faults */
+static bool shadow = true; /* Optionally disable shadow framebuffer */
 static int pixel_limit; /* Optionally force a pixel resolution limit */
 
 struct dlfb_deferred_free {
index 53d08d1b56f553857bd7554097af47b72ed046a3..bee29aadc6460e40c73a4d3a6c38b7a34724ac1b 100644 (file)
@@ -45,7 +45,7 @@ static const struct fb_fix_screeninfo uvesafb_fix = {
 };
 
 static int mtrr                = 3;    /* enable mtrr by default */
-static bool blank      = 1;    /* enable blanking by default */
+static bool blank      = true; /* enable blanking by default */
 static int ypan                = 1;    /* 0: scroll, 1: ypan, 2: ywrap */
 static bool pmi_setpal = true; /* use PMI for palette changes */
 static bool nocrtc;            /* ignore CRTC settings */
@@ -1560,7 +1560,7 @@ static ssize_t uvesafb_show_vbe_modes(struct device *dev,
        int ret = 0, i;
 
        for (i = 0; i < par->vbe_modes_cnt && ret < PAGE_SIZE; i++) {
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                        "%dx%d-%d, 0x%.4x\n",
                        par->vbe_modes[i].x_res, par->vbe_modes[i].y_res,
                        par->vbe_modes[i].depth, par->vbe_modes[i].mode_id);
@@ -1824,19 +1824,19 @@ static int uvesafb_setup(char *options)
                else if (!strcmp(this_opt, "ywrap"))
                        ypan = 2;
                else if (!strcmp(this_opt, "vgapal"))
-                       pmi_setpal = 0;
+                       pmi_setpal = false;
                else if (!strcmp(this_opt, "pmipal"))
-                       pmi_setpal = 1;
+                       pmi_setpal = true;
                else if (!strncmp(this_opt, "mtrr:", 5))
                        mtrr = simple_strtoul(this_opt+5, NULL, 0);
                else if (!strcmp(this_opt, "nomtrr"))
                        mtrr = 0;
                else if (!strcmp(this_opt, "nocrtc"))
-                       nocrtc = 1;
+                       nocrtc = true;
                else if (!strcmp(this_opt, "noedid"))
-                       noedid = 1;
+                       noedid = true;
                else if (!strcmp(this_opt, "noblank"))
-                       blank = 0;
+                       blank = true;
                else if (!strncmp(this_opt, "vtotal:", 7))
                        vram_total = simple_strtoul(this_opt + 7, NULL, 0);
                else if (!strncmp(this_opt, "vremap:", 7))
index 4d20c4603e5ae7529545c1b60654cddbda28b954..8425afe37d7c08890491eb72017fa32b88f71feb 100644 (file)
@@ -331,7 +331,7 @@ int __init valkyriefb_init(void)
                struct resource r;
 
                dp = of_find_node_by_name(NULL, "valkyrie");
-               if (dp == 0)
+               if (!dp)
                        return 0;
 
                if (of_address_to_resource(dp, 0, &r)) {
@@ -345,7 +345,7 @@ int __init valkyriefb_init(void)
 #endif /* ppc (!CONFIG_MAC) */
 
        p = kzalloc(sizeof(*p), GFP_ATOMIC);
-       if (p == 0)
+       if (!p)
                return -ENOMEM;
 
        /* Map in frame buffer and registers */
index a1fe24ea869b8b287bd93dd751743709f046b214..df6de5a9dd4cd9981ac11cc70e38e4309dde7cc0 100644 (file)
@@ -32,6 +32,7 @@
 struct vesafb_par {
        u32 pseudo_palette[256];
        int wc_cookie;
+       struct resource *region;
 };
 
 static struct fb_var_screeninfo vesafb_defined = {
@@ -411,7 +412,7 @@ static int vesafb_probe(struct platform_device *dev)
 
        /* request failure does not faze us, as vgacon probably has this
         * region already (FIXME) */
-       request_region(0x3c0, 32, "vesafb");
+       par->region = request_region(0x3c0, 32, "vesafb");
 
        if (mtrr == 3) {
                unsigned int temp_size = size_total;
@@ -439,7 +440,7 @@ static int vesafb_probe(struct platform_device *dev)
                       "vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
                        vesafb_fix.smem_len, vesafb_fix.smem_start);
                err = -EIO;
-               goto err;
+               goto err_release_region;
        }
 
        printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
@@ -458,19 +459,22 @@ static int vesafb_probe(struct platform_device *dev)
 
        if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
                err = -ENOMEM;
-               goto err;
+               goto err_release_region;
        }
        if (register_framebuffer(info)<0) {
                err = -EINVAL;
                fb_dealloc_cmap(&info->cmap);
-               goto err;
+               goto err_release_region;
        }
        fb_info(info, "%s frame buffer device\n", info->fix.id);
        return 0;
-err:
+err_release_region:
        arch_phys_wc_del(par->wc_cookie);
        if (info->screen_base)
                iounmap(info->screen_base);
+       if (par->region)
+               release_region(0x3c0, 32);
+err:
        framebuffer_release(info);
        release_mem_region(vesafb_fix.smem_start, size_total);
        return err;
@@ -481,6 +485,8 @@ static int vesafb_remove(struct platform_device *pdev)
        struct fb_info *info = platform_get_drvdata(pdev);
 
        unregister_framebuffer(info);
+       if (((struct vesafb_par *)(info->par))->region)
+               release_region(0x3c0, 32);
        framebuffer_release(info);
 
        return 0;
index 6a320bd76936daede721ce44fa884f4a032e53fd..80fdfe4171c539409be29d86ecefa7f17453ac1b 100644 (file)
@@ -7,6 +7,8 @@
 #ifndef __DEBUG_H__
 #define __DEBUG_H__
 
+#include <linux/printk.h>
+
 #ifndef VIAFB_DEBUG
 #define VIAFB_DEBUG 0
 #endif
 #if VIAFB_DEBUG
 #define DEBUG_MSG(f, a...)   printk(f, ## a)
 #else
-#define DEBUG_MSG(f, a...)
+#define DEBUG_MSG(f, a...)   no_printk(f, ## a)
 #endif
 
 #define VIAFB_WARN 0
 #if VIAFB_WARN
 #define WARN_MSG(f, a...)   printk(f, ## a)
 #else
-#define WARN_MSG(f, a...)
+#define WARN_MSG(f, a...)   no_printk(f, ## a)
 #endif
 
 #endif /* __DEBUG_H__ */
index 852673c40a2f35d3100f4a9eaf48fa5dbc8c4baa..22deb340a0484fe394d96ac0c5c24a9b497de235 100644 (file)
@@ -1144,7 +1144,7 @@ static ssize_t viafb_dvp0_proc_write(struct file *file,
                if (value != NULL) {
                        if (kstrtou8(value, 0, &reg_val) < 0)
                                return -EINVAL;
-                       DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i,
+                       DEBUG_MSG(KERN_INFO "DVP0:reg_val[%lu]=:%x\n", i,
                                  reg_val);
                        switch (i) {
                        case 0:
index f744479dc7df63356af8f399ddabda40193cd748..c61476247ba8ddbbdc887acafef00eda91f7ff27 100644 (file)
@@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
                    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
                        for (i = 0; i < 256; i++)
                                vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
+               fallthrough;
        case FB_BLANK_UNBLANK:
                if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
                    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
index 2d6e2738b792c584abefe7492881ab47027f4c38..d96ab28f8ce4ae54b384c3a4d8f2b9bfa085187e 100644 (file)
@@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
                memsize=par->mach->mem->size;
                memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
                vfree(par->saved_extmem);
+               par->saved_extmem = NULL;
        }
        if (par->saved_intmem) {
                memsize=MEM_INT_SIZE;
@@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
                else
                        memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
                vfree(par->saved_intmem);
+               par->saved_intmem = NULL;
        }
 }
 
index 53e04926a7b2d3ffb6748278550cbd845a34f3ea..190d26e2e75f9e7a8c5bf2fd35fe4c202d9380f3 100644 (file)
@@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd)
 {
        struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
 
+       writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
        writel_relaxed(0, wdt->base + WDTCONTROL);
        writel_relaxed(0, wdt->base + WDTLOAD);
        writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
 
+       /* Flush posted writes. */
+       readl_relaxed(wdt->base + WDTLOCK);
+
        return 0;
 }
 
index 385843256865e44b68d2d57071fb89ecedafad83..040d2a43e8e350924f259a3aaa8468b65edba15f 100644 (file)
@@ -448,7 +448,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
                           unsigned int nr_grefs, void **vaddr)
 {
-       return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+       int err;
+
+       err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+       /* Some hypervisors are buggy and can return 1. */
+       if (err > 0)
+               err = GNTST_general_error;
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
 
index 5c794f4b051afcc953f97ceff3b562402a31b4e0..d1e1caa23c8b3fd4895473cc951772693ed77bfa 100644 (file)
@@ -1032,7 +1032,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        struct dentry *parent;
        struct inode *inode;
        struct key *key;
-       afs_dataversion_t dir_version;
+       afs_dataversion_t dir_version, invalid_before;
        long de_version;
        int ret;
 
@@ -1084,8 +1084,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        if (de_version == (long)dir_version)
                goto out_valid_noupdate;
 
-       dir_version = dir->invalid_before;
-       if (de_version - (long)dir_version >= 0)
+       invalid_before = dir->invalid_before;
+       if (de_version - (long)invalid_before >= 0)
                goto out_valid;
 
        _debug("dir modified");
@@ -1275,6 +1275,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct afs_fs_cursor fc;
        struct afs_vnode *dvnode = AFS_FS_I(dir);
        struct key *key;
+       afs_dataversion_t data_version;
        int ret;
 
        mode |= S_IFDIR;
@@ -1295,7 +1296,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
-               afs_dataversion_t data_version = dvnode->status.data_version + 1;
+               data_version = dvnode->status.data_version + 1;
 
                while (afs_select_fileserver(&fc)) {
                        fc.cb_break = afs_calc_vnode_cb_break(dvnode);
@@ -1316,10 +1317,14 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
                goto error_key;
        }
 
-       if (ret == 0 &&
-           test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
-               afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
-                                afs_edit_dir_for_create);
+       if (ret == 0) {
+               down_write(&dvnode->validate_lock);
+               if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+                   dvnode->status.data_version == data_version)
+                       afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
+                                        afs_edit_dir_for_create);
+               up_write(&dvnode->validate_lock);
+       }
 
        key_put(key);
        kfree(scb);
@@ -1360,6 +1365,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
        struct afs_fs_cursor fc;
        struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
        struct key *key;
+       afs_dataversion_t data_version;
        int ret;
 
        _enter("{%llx:%llu},{%pd}",
@@ -1391,7 +1397,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
 
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
-               afs_dataversion_t data_version = dvnode->status.data_version + 1;
+               data_version = dvnode->status.data_version + 1;
 
                while (afs_select_fileserver(&fc)) {
                        fc.cb_break = afs_calc_vnode_cb_break(dvnode);
@@ -1404,9 +1410,12 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
                ret = afs_end_vnode_operation(&fc);
                if (ret == 0) {
                        afs_dir_remove_subdir(dentry);
-                       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+                       down_write(&dvnode->validate_lock);
+                       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+                           dvnode->status.data_version == data_version)
                                afs_edit_dir_remove(dvnode, &dentry->d_name,
                                                    afs_edit_dir_for_rmdir);
+                       up_write(&dvnode->validate_lock);
                }
        }
 
@@ -1544,10 +1553,15 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
                ret = afs_end_vnode_operation(&fc);
                if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
                        ret = afs_dir_remove_link(dvnode, dentry, key);
-               if (ret == 0 &&
-                   test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
-                       afs_edit_dir_remove(dvnode, &dentry->d_name,
-                                           afs_edit_dir_for_unlink);
+
+               if (ret == 0) {
+                       down_write(&dvnode->validate_lock);
+                       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+                           dvnode->status.data_version == data_version)
+                               afs_edit_dir_remove(dvnode, &dentry->d_name,
+                                                   afs_edit_dir_for_unlink);
+                       up_write(&dvnode->validate_lock);
+               }
        }
 
        if (need_rehash && ret < 0 && ret != -ENOENT)
@@ -1573,6 +1587,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        struct afs_status_cb *scb;
        struct afs_vnode *dvnode = AFS_FS_I(dir);
        struct key *key;
+       afs_dataversion_t data_version;
        int ret;
 
        mode |= S_IFREG;
@@ -1597,7 +1612,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
-               afs_dataversion_t data_version = dvnode->status.data_version + 1;
+               data_version = dvnode->status.data_version + 1;
 
                while (afs_select_fileserver(&fc)) {
                        fc.cb_break = afs_calc_vnode_cb_break(dvnode);
@@ -1618,9 +1633,12 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                goto error_key;
        }
 
-       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+       down_write(&dvnode->validate_lock);
+       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+           dvnode->status.data_version == data_version)
                afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
                                 afs_edit_dir_for_create);
+       up_write(&dvnode->validate_lock);
 
        kfree(scb);
        key_put(key);
@@ -1648,6 +1666,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
        struct afs_vnode *dvnode = AFS_FS_I(dir);
        struct afs_vnode *vnode = AFS_FS_I(d_inode(from));
        struct key *key;
+       afs_dataversion_t data_version;
        int ret;
 
        _enter("{%llx:%llu},{%llx:%llu},{%pd}",
@@ -1672,7 +1691,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
 
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
-               afs_dataversion_t data_version = dvnode->status.data_version + 1;
+               data_version = dvnode->status.data_version + 1;
 
                if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
                        afs_end_vnode_operation(&fc);
@@ -1702,9 +1721,12 @@ static int afs_link(struct dentry *from, struct inode *dir,
                goto error_key;
        }
 
-       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+       down_write(&dvnode->validate_lock);
+       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+           dvnode->status.data_version == data_version)
                afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid,
                                 afs_edit_dir_for_link);
+       up_write(&dvnode->validate_lock);
 
        key_put(key);
        kfree(scb);
@@ -1732,6 +1754,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
        struct afs_status_cb *scb;
        struct afs_vnode *dvnode = AFS_FS_I(dir);
        struct key *key;
+       afs_dataversion_t data_version;
        int ret;
 
        _enter("{%llx:%llu},{%pd},%s",
@@ -1759,7 +1782,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
 
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
-               afs_dataversion_t data_version = dvnode->status.data_version + 1;
+               data_version = dvnode->status.data_version + 1;
 
                while (afs_select_fileserver(&fc)) {
                        fc.cb_break = afs_calc_vnode_cb_break(dvnode);
@@ -1780,9 +1803,12 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
                goto error_key;
        }
 
-       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+       down_write(&dvnode->validate_lock);
+       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+           dvnode->status.data_version == data_version)
                afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
                                 afs_edit_dir_for_symlink);
+       up_write(&dvnode->validate_lock);
 
        key_put(key);
        kfree(scb);
@@ -1812,6 +1838,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct dentry *tmp = NULL, *rehash = NULL;
        struct inode *new_inode;
        struct key *key;
+       afs_dataversion_t orig_data_version;
+       afs_dataversion_t new_data_version;
        bool new_negative = d_is_negative(new_dentry);
        int ret;
 
@@ -1890,10 +1918,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
-               afs_dataversion_t orig_data_version;
-               afs_dataversion_t new_data_version;
-               struct afs_status_cb *new_scb = &scb[1];
-
                orig_data_version = orig_dvnode->status.data_version + 1;
 
                if (orig_dvnode != new_dvnode) {
@@ -1904,7 +1928,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        new_data_version = new_dvnode->status.data_version + 1;
                } else {
                        new_data_version = orig_data_version;
-                       new_scb = &scb[0];
                }
 
                while (afs_select_fileserver(&fc)) {
@@ -1912,7 +1935,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode);
                        afs_fs_rename(&fc, old_dentry->d_name.name,
                                      new_dvnode, new_dentry->d_name.name,
-                                     &scb[0], new_scb);
+                                     &scb[0], &scb[1]);
                }
 
                afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break,
@@ -1930,18 +1953,25 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (ret == 0) {
                if (rehash)
                        d_rehash(rehash);
-               if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags))
-                   afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
-                                       afs_edit_dir_for_rename_0);
+               down_write(&orig_dvnode->validate_lock);
+               if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
+                   orig_dvnode->status.data_version == orig_data_version)
+                       afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
+                                           afs_edit_dir_for_rename_0);
+               if (orig_dvnode != new_dvnode) {
+                       up_write(&orig_dvnode->validate_lock);
 
-               if (!new_negative &&
-                   test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags))
-                       afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
-                                           afs_edit_dir_for_rename_1);
+                       down_write(&new_dvnode->validate_lock);
+               }
+               if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
+                   orig_dvnode->status.data_version == new_data_version) {
+                       if (!new_negative)
+                               afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
+                                                   afs_edit_dir_for_rename_1);
 
-               if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags))
                        afs_edit_dir_add(new_dvnode, &new_dentry->d_name,
                                         &vnode->fid, afs_edit_dir_for_rename_2);
+               }
 
                new_inode = d_inode(new_dentry);
                if (new_inode) {
@@ -1957,14 +1987,10 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * Note that if we ever implement RENAME_EXCHANGE, we'll have
                 * to update both dentries with opposing dir versions.
                 */
-               if (new_dvnode != orig_dvnode) {
-                       afs_update_dentry_version(&fc, old_dentry, &scb[1]);
-                       afs_update_dentry_version(&fc, new_dentry, &scb[1]);
-               } else {
-                       afs_update_dentry_version(&fc, old_dentry, &scb[0]);
-                       afs_update_dentry_version(&fc, new_dentry, &scb[0]);
-               }
+               afs_update_dentry_version(&fc, old_dentry, &scb[1]);
+               afs_update_dentry_version(&fc, new_dentry, &scb[1]);
                d_move(old_dentry, new_dentry);
+               up_write(&new_dvnode->validate_lock);
                goto error_tmp;
        }
 
index 361088a5edb98cc2fe75062b85f0daf587c4f904..d94e2b7cddff0bfac21bf01333b177ad752ad447 100644 (file)
@@ -21,6 +21,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
 {
        struct afs_fs_cursor fc;
        struct afs_status_cb *scb;
+       afs_dataversion_t dir_data_version;
        int ret = -ERESTARTSYS;
 
        _enter("%pd,%pd", old, new);
@@ -31,7 +32,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
 
        trace_afs_silly_rename(vnode, false);
        if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
-               afs_dataversion_t dir_data_version = dvnode->status.data_version + 1;
+               dir_data_version = dvnode->status.data_version + 1;
 
                while (afs_select_fileserver(&fc)) {
                        fc.cb_break = afs_calc_vnode_cb_break(dvnode);
@@ -54,12 +55,15 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
                        dvnode->silly_key = key_get(key);
                }
 
-               if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+               down_write(&dvnode->validate_lock);
+               if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+                   dvnode->status.data_version == dir_data_version) {
                        afs_edit_dir_remove(dvnode, &old->d_name,
                                            afs_edit_dir_for_silly_0);
-               if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
                        afs_edit_dir_add(dvnode, &new->d_name,
                                         &vnode->fid, afs_edit_dir_for_silly_1);
+               }
+               up_write(&dvnode->validate_lock);
        }
 
        kfree(scb);
@@ -181,10 +185,14 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
                                clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
                        }
                }
-               if (ret == 0 &&
-                   test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
-                       afs_edit_dir_remove(dvnode, &dentry->d_name,
-                                           afs_edit_dir_for_unlink);
+               if (ret == 0) {
+                       down_write(&dvnode->validate_lock);
+                       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+                           dvnode->status.data_version == dir_data_version)
+                               afs_edit_dir_remove(dvnode, &dentry->d_name,
+                                                   afs_edit_dir_for_unlink);
+                       up_write(&dvnode->validate_lock);
+               }
        }
 
        kfree(scb);
index 1f9c5d8e6fe5538a0702d00824719bdb7cc90d02..68fc46634346a13270497bbcf87cfb0d99340d02 100644 (file)
@@ -65,6 +65,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
        bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
        u64 data_version, size;
        u32 type, abort_code;
+       int ret;
 
        abort_code = ntohl(xdr->abort_code);
 
@@ -78,7 +79,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
                         */
                        status->abort_code = abort_code;
                        scb->have_error = true;
-                       return 0;
+                       goto good;
                }
 
                pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version));
@@ -87,7 +88,8 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
 
        if (abort_code != 0 && inline_error) {
                status->abort_code = abort_code;
-               return 0;
+               scb->have_error = true;
+               goto good;
        }
 
        type = ntohl(xdr->type);
@@ -123,13 +125,16 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
        data_version |= (u64)ntohl(xdr->data_version_hi) << 32;
        status->data_version = data_version;
        scb->have_status = true;
-
+good:
+       ret = 0;
+advance:
        *_bp = (const void *)*_bp + sizeof(*xdr);
-       return 0;
+       return ret;
 
 bad:
        xdr_dump_bad(*_bp);
-       return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+       ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+       goto advance;
 }
 
 static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
@@ -981,16 +986,16 @@ static int afs_deliver_fs_rename(struct afs_call *call)
        if (ret < 0)
                return ret;
 
-       /* unmarshall the reply once we've received all of it */
+       /* If the two dirs are the same, we have two copies of the same status
+        * report, so we just decode it twice.
+        */
        bp = call->buffer;
        ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
        if (ret < 0)
                return ret;
-       if (call->out_dir_scb != call->out_scb) {
-               ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
+       if (ret < 0)
+               return ret;
        xdr_decode_AFSVolSync(&bp, call->out_volsync);
 
        _leave(" = 0 [done]");
index a26126ac7bf1d17c3b0280c2a54e05609b5d6e0c..b5b45c57e1b1d3f94db891811b2f8e9cb457c837 100644 (file)
@@ -165,15 +165,15 @@ static void xdr_dump_bad(const __be32 *bp)
        int i;
 
        pr_notice("YFS XDR: Bad status record\n");
-       for (i = 0; i < 5 * 4 * 4; i += 16) {
+       for (i = 0; i < 6 * 4 * 4; i += 16) {
                memcpy(x, bp, 16);
                bp += 4;
                pr_notice("%03x: %08x %08x %08x %08x\n",
                          i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3]));
        }
 
-       memcpy(x, bp, 4);
-       pr_notice("0x50: %08x\n", ntohl(x[0]));
+       memcpy(x, bp, 8);
+       pr_notice("0x60: %08x %08x\n", ntohl(x[0]), ntohl(x[1]));
 }
 
 /*
@@ -186,13 +186,14 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
        const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp;
        struct afs_file_status *status = &scb->status;
        u32 type;
+       int ret;
 
        status->abort_code = ntohl(xdr->abort_code);
        if (status->abort_code != 0) {
                if (status->abort_code == VNOVNODE)
                        status->nlink = 0;
                scb->have_error = true;
-               return 0;
+               goto good;
        }
 
        type = ntohl(xdr->type);
@@ -220,13 +221,16 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
        status->size            = xdr_to_u64(xdr->size);
        status->data_version    = xdr_to_u64(xdr->data_version);
        scb->have_status        = true;
-
+good:
+       ret = 0;
+advance:
        *_bp += xdr_size(xdr);
-       return 0;
+       return ret;
 
 bad:
        xdr_dump_bad(*_bp);
-       return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+       ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+       goto advance;
 }
 
 /*
@@ -1153,11 +1157,9 @@ static int yfs_deliver_fs_rename(struct afs_call *call)
        ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
        if (ret < 0)
                return ret;
-       if (call->out_dir_scb != call->out_scb) {
-               ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
+       if (ret < 0)
+               return ret;
 
        xdr_decode_YFSVolSync(&bp, call->out_volsync);
        _leave(" = 0 [done]");
index 786849fcc319ade0c53d82338acffc626566ff98..47f66c6a7d7fca39d6d55b7338f2685ac6b0155e 100644 (file)
@@ -3370,6 +3370,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                            space_info->bytes_reserved > 0 ||
                            space_info->bytes_may_use > 0))
                        btrfs_dump_space_info(info, space_info, 0, 0);
+               WARN_ON(space_info->reclaim_size > 0);
                list_del(&space_info->list);
                btrfs_sysfs_remove_space_info(space_info);
        }
index 8a144f9cb7ac756fd1883568ab10b3bbb37b7e53..719e68ab552c52f3a2647d0b9ae8fcf38c9697a1 100644 (file)
@@ -2097,6 +2097,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        atomic_inc(&root->log_batch);
 
+       /*
+        * If the inode needs a full sync, make sure we use a full range to
+        * avoid log tree corruption, due to hole detection racing with ordered
+        * extent completion for adjacent ranges and races between logging and
+        * completion of ordered extents for adjancent ranges - both races
+        * could lead to file extent items in the log with overlapping ranges.
+        * Do this while holding the inode lock, to avoid races with other
+        * tasks.
+        */
+       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                    &BTRFS_I(inode)->runtime_flags)) {
+               start = 0;
+               end = LLONG_MAX;
+       }
+
        /*
         * Before we acquired the inode's lock, someone may have dirtied more
         * pages in the target range. We need to make sure that writeback for
index d1973141d3bb819ed90d7aaa43f67f470ef40e1e..040009d1cc317f99f41e5a98a729714ce256fb51 100644 (file)
@@ -264,6 +264,7 @@ copy_inline_extent:
                            size);
        inode_add_bytes(dst, datal);
        set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
+       ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
 out:
        if (!ret && !trans) {
                /*
index f65595602aa87712e3a6e43c2cc0095941c328a9..d35936c934ab0dacc8623b800c3843fc09a7cca3 100644 (file)
@@ -611,8 +611,8 @@ static int should_ignore_root(struct btrfs_root *root)
        if (!reloc_root)
                return 0;
 
-       if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
-           root->fs_info->running_transaction->transid - 1)
+       if (btrfs_header_generation(reloc_root->commit_root) ==
+           root->fs_info->running_transaction->transid)
                return 0;
        /*
         * if there is reloc tree and it was created in previous
@@ -1527,8 +1527,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
        int clear_rsv = 0;
        int ret;
 
-       if (!rc || !rc->create_reloc_tree ||
-           root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+       if (!rc)
                return 0;
 
        /*
@@ -1538,12 +1537,28 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
        if (reloc_root_is_dead(root))
                return 0;
 
+       /*
+        * This is subtle but important.  We do not do
+        * record_root_in_transaction for reloc roots, instead we record their
+        * corresponding fs root, and then here we update the last trans for the
+        * reloc root.  This means that we have to do this for the entire life
+        * of the reloc root, regardless of which stage of the relocation we are
+        * in.
+        */
        if (root->reloc_root) {
                reloc_root = root->reloc_root;
                reloc_root->last_trans = trans->transid;
                return 0;
        }
 
+       /*
+        * We are merging reloc roots, we do not need new reloc trees.  Also
+        * reloc trees never need their own reloc tree.
+        */
+       if (!rc->create_reloc_tree ||
+           root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+               return 0;
+
        if (!trans->reloc_reserved) {
                rsv = trans->block_rsv;
                trans->block_rsv = rc->block_rsv;
index 8b0fe053a25dadc5bc836f58a65e1c607d48340e..ff17a44203588c8ea98f7bbbbc35c8a5fd73431d 100644 (file)
@@ -361,6 +361,16 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
+static void remove_ticket(struct btrfs_space_info *space_info,
+                         struct reserve_ticket *ticket)
+{
+       if (!list_empty(&ticket->list)) {
+               list_del_init(&ticket->list);
+               ASSERT(space_info->reclaim_size >= ticket->bytes);
+               space_info->reclaim_size -= ticket->bytes;
+       }
+}
+
 /*
  * This is for space we already have accounted in space_info->bytes_may_use, so
  * basically when we're returning space from block_rsv's.
@@ -388,9 +398,7 @@ again:
                        btrfs_space_info_update_bytes_may_use(fs_info,
                                                              space_info,
                                                              ticket->bytes);
-                       list_del_init(&ticket->list);
-                       ASSERT(space_info->reclaim_size >= ticket->bytes);
-                       space_info->reclaim_size -= ticket->bytes;
+                       remove_ticket(space_info, ticket);
                        ticket->bytes = 0;
                        space_info->tickets_id++;
                        wake_up(&ticket->wait);
@@ -899,7 +907,7 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
                        btrfs_info(fs_info, "failing ticket with %llu bytes",
                                   ticket->bytes);
 
-               list_del_init(&ticket->list);
+               remove_ticket(space_info, ticket);
                ticket->error = -ENOSPC;
                wake_up(&ticket->wait);
 
@@ -1063,7 +1071,7 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
                         * despite getting an error, resulting in a space leak
                         * (bytes_may_use counter of our space_info).
                         */
-                       list_del_init(&ticket->list);
+                       remove_ticket(space_info, ticket);
                        ticket->error = -EINTR;
                        break;
                }
@@ -1121,7 +1129,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
                 * either the async reclaim job deletes the ticket from the list
                 * or we delete it ourselves at wait_reserve_ticket().
                 */
-               list_del_init(&ticket->list);
+               remove_ticket(space_info, ticket);
                if (!ret)
                        ret = -ENOSPC;
        }
index 58c111474ba5bb5d0630bea17f85d1ad6927ddb6..ec36a7c6ba3de86b65b54739d71ecc47a5e42c4b 100644 (file)
@@ -96,8 +96,8 @@ enum {
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, struct btrfs_inode *inode,
                           int inode_only,
-                          u64 start,
-                          u64 end,
+                          const loff_t start,
+                          const loff_t end,
                           struct btrfs_log_ctx *ctx);
 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
@@ -4533,15 +4533,13 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_inode *inode,
-                          struct btrfs_path *path,
-                          const u64 start,
-                          const u64 end)
+                          struct btrfs_path *path)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_key key;
        const u64 ino = btrfs_ino(inode);
        const u64 i_size = i_size_read(&inode->vfs_inode);
-       u64 prev_extent_end = start;
+       u64 prev_extent_end = 0;
        int ret;
 
        if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
@@ -4549,21 +4547,14 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
 
        key.objectid = ino;
        key.type = BTRFS_EXTENT_DATA_KEY;
-       key.offset = start;
+       key.offset = 0;
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                return ret;
 
-       if (ret > 0 && path->slots[0] > 0) {
-               btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
-               if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
-                       path->slots[0]--;
-       }
-
        while (true) {
                struct extent_buffer *leaf = path->nodes[0];
-               u64 extent_end;
 
                if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
                        ret = btrfs_next_leaf(root, path);
@@ -4580,18 +4571,9 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
                if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
                        break;
 
-               extent_end = btrfs_file_extent_end(path);
-               if (extent_end <= start)
-                       goto next_slot;
-
                /* We have a hole, log it. */
                if (prev_extent_end < key.offset) {
-                       u64 hole_len;
-
-                       if (key.offset >= end)
-                               hole_len = end - prev_extent_end;
-                       else
-                               hole_len = key.offset - prev_extent_end;
+                       const u64 hole_len = key.offset - prev_extent_end;
 
                        /*
                         * Release the path to avoid deadlocks with other code
@@ -4621,20 +4603,16 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
                        leaf = path->nodes[0];
                }
 
-               prev_extent_end = min(extent_end, end);
-               if (extent_end >= end)
-                       break;
-next_slot:
+               prev_extent_end = btrfs_file_extent_end(path);
                path->slots[0]++;
                cond_resched();
        }
 
-       if (prev_extent_end < end && prev_extent_end < i_size) {
+       if (prev_extent_end < i_size) {
                u64 hole_len;
 
                btrfs_release_path(path);
-               hole_len = min(ALIGN(i_size, fs_info->sectorsize), end);
-               hole_len -= prev_extent_end;
+               hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
                ret = btrfs_insert_file_extent(trans, root->log_root,
                                               ino, prev_extent_end, 0, 0,
                                               hole_len, 0, hole_len,
@@ -4971,8 +4949,6 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
                                   const u64 logged_isize,
                                   const bool recursive_logging,
                                   const int inode_only,
-                                  const u64 start,
-                                  const u64 end,
                                   struct btrfs_log_ctx *ctx,
                                   bool *need_log_inode_item)
 {
@@ -4981,21 +4957,6 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
        int ins_nr = 0;
        int ret;
 
-       /*
-        * We must make sure we don't copy extent items that are entirely out of
-        * the range [start, end - 1]. This is not just an optimization to avoid
-        * copying but also needed to avoid a corruption where we end up with
-        * file extent items in the log tree that have overlapping ranges - this
-        * can happen if we race with ordered extent completion for ranges that
-        * are outside our target range. For example we copy an extent item and
-        * when we move to the next leaf, that extent was trimmed and a new one
-        * covering a subrange of it, but with a higher key, was inserted - we
-        * would then copy this other extent too, resulting in a log tree with
-        * 2 extent items that represent overlapping ranges.
-        *
-        * We can copy the entire extents at the range bondaries however, even
-        * if they cover an area outside the target range. That's ok.
-        */
        while (1) {
                ret = btrfs_search_forward(root, min_key, path, trans->transid);
                if (ret < 0)
@@ -5063,29 +5024,6 @@ again:
                        goto next_slot;
                }
 
-               if (min_key->type == BTRFS_EXTENT_DATA_KEY) {
-                       const u64 extent_end = btrfs_file_extent_end(path);
-
-                       if (extent_end <= start) {
-                               if (ins_nr > 0) {
-                                       ret = copy_items(trans, inode, dst_path,
-                                                        path, ins_start_slot,
-                                                        ins_nr, inode_only,
-                                                        logged_isize);
-                                       if (ret < 0)
-                                               return ret;
-                                       ins_nr = 0;
-                               }
-                               goto next_slot;
-                       }
-                       if (extent_end >= end) {
-                               ins_nr++;
-                               if (ins_nr == 1)
-                                       ins_start_slot = path->slots[0];
-                               break;
-                       }
-               }
-
                if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
                        ins_nr++;
                        goto next_slot;
@@ -5151,8 +5089,8 @@ next_key:
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, struct btrfs_inode *inode,
                           int inode_only,
-                          u64 start,
-                          u64 end,
+                          const loff_t start,
+                          const loff_t end,
                           struct btrfs_log_ctx *ctx)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5180,9 +5118,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        }
 
-       start = ALIGN_DOWN(start, fs_info->sectorsize);
-       end = ALIGN(end, fs_info->sectorsize);
-
        min_key.objectid = ino;
        min_key.type = BTRFS_INODE_ITEM_KEY;
        min_key.offset = 0;
@@ -5298,8 +5233,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 
        err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
                                      path, dst_path, logged_isize,
-                                     recursive_logging, inode_only,
-                                     start, end, ctx, &need_log_inode_item);
+                                     recursive_logging, inode_only, ctx,
+                                     &need_log_inode_item);
        if (err)
                goto out_unlock;
 
@@ -5312,7 +5247,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
                btrfs_release_path(path);
                btrfs_release_path(dst_path);
-               err = btrfs_log_holes(trans, root, inode, path, start, end);
+               err = btrfs_log_holes(trans, root, inode, path);
                if (err)
                        goto out_unlock;
        }
index f73276d746bbfc7f2c98bf057938cc5f6c5e8ef0..599a0bf7257bdbc735668a36c11371194cd82cd4 100644 (file)
@@ -1371,6 +1371,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
 }
 EXPORT_SYMBOL(__breadahead);
 
+void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
+                     gfp_t gfp)
+{
+       struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
+       if (likely(bh)) {
+               ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
+               brelse(bh);
+       }
+}
+EXPORT_SYMBOL(__breadahead_gfp);
+
 /**
  *  __bread_gfp() - reads a specified block and returns the bh
  *  @bdev: the block_device to read from
index d594c26274305ac44d46c647545b503c787232bc..4c4202c93b7151ea120cf49ba6545d8c7c8d9753 100644 (file)
@@ -1051,8 +1051,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
 
        /* If op failed, mark everyone involved for errors */
        if (result) {
-               int pathlen;
-               u64 base;
+               int pathlen = 0;
+               u64 base = 0;
                char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
                                                  &base, 0);
 
index 4a5ccbb7e808e40800f4133f898cece536a908b5..afdfca965a7fc59a12befb6731409e4155a6081f 100644 (file)
@@ -527,8 +527,8 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
 
        if (result) {
                struct dentry *dentry = req->r_dentry;
-               int pathlen;
-               u64 base;
+               int pathlen = 0;
+               u64 base = 0;
                char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
                                                  &base, 0);
 
index 4e5be79bf0803bdb647b5612db76676766999165..903d9edfd4bf203f83b654e3c328901fb23b3d1b 100644 (file)
@@ -521,7 +521,7 @@ extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
 
 static inline void ceph_mdsc_free_path(char *path, int len)
 {
-       if (path)
+       if (!IS_ERR_OR_NULL(path))
                __putname(path - (PATH_MAX - 1 - len));
 }
 
index 140efc1a937469e73ac801820625ad18ab01705d..182b864b3075bdd1f0b079f9047c8a0f4a5d796d 100644 (file)
@@ -594,6 +594,8 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
                               cifs_max_pending);
        set_credits(server, server->maxReq);
        server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
+       /* set up max_read for readpages check */
+       server->max_read = server->maxBuf;
        /* even though we do not use raw we might as well set this
        accurately, in case we ever find a need for it */
        if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
@@ -755,6 +757,8 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
        set_credits(server, server->maxReq);
        /* probably no need to store and check maxvcs */
        server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
+       /* set up max_read for readpages check */
+       server->max_read = server->maxBuf;
        server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
        cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
        server->capabilities = le32_to_cpu(pSMBr->Capabilities);
index 8fbbdcdad8ffa89fda849927cec56fdb7666fda6..390d2b15ef6ef9d7014e90069cdad3d2880806d3 100644 (file)
@@ -61,7 +61,7 @@ static void cifs_set_ops(struct inode *inode)
                }
 
                /* check if server can support readpages */
-               if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
+               if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
                                PAGE_SIZE + MAX_CIFS_HDR_SIZE)
                        inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
                else
index 47d3e382ecaac269fd447a381dfa1fa56ca61e64..b30aa3cdd845f333304de9fcbdc95f1ae3ec5e99 100644 (file)
@@ -1552,6 +1552,21 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
        }
 
        rc = SMB2_sess_establish_session(sess_data);
+#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+       if (ses->server->dialect < SMB30_PROT_ID) {
+               cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
+               /*
+                * The session id is opaque in terms of endianness, so we can't
+                * print it as a long long. we dump it as we got it on the wire
+                */
+               cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
+                        &ses->Suid);
+               cifs_dbg(VFS, "Session Key   %*ph\n",
+                        SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
+               cifs_dbg(VFS, "Signing Key   %*ph\n",
+                        SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
+       }
+#endif
 out:
        kfree(ntlmssp_blob);
        SMB2_sess_free_buffer(sess_data);
index 1a6c227ada8f52597b9d87ae760659290a6502ab..c0348e3b1695828f0178c55d52bd4024b0e8b474 100644 (file)
@@ -660,8 +660,8 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
                return rc;
 
        if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) {
-               dump_stack();
-               cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n", shdr->Command, shdr->MessageId);
+               cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n",
+                       shdr->Command, shdr->MessageId);
                return -EACCES;
        } else
                return 0;
index 0e0a4d6209c7d71afa09da41a8cf0d7d45f5eeff..a32e5f7b53853ffce8c8249c0753ff3ad554d5b1 100644 (file)
@@ -410,7 +410,7 @@ verified:
  * Read the bitmap for a given block_group,and validate the
  * bits for block/inode/inode tables are set in the bitmaps
  *
- * Return buffer_head on success or NULL in case of failure.
+ * Return buffer_head on success or an ERR_PTR in case of failure.
  */
 struct buffer_head *
 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
@@ -502,7 +502,7 @@ out:
        return ERR_PTR(err);
 }
 
-/* Returns 0 on success, 1 on error */
+/* Returns 0 on success, -errno on error */
 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
                           struct buffer_head *bh)
 {
index 7f16e1af8d5cc6fb371f98b64e9d0b127aec4ca7..0c76cdd44d90d02316114172da2ea8bd5120e6a9 100644 (file)
@@ -338,9 +338,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
                if (inode && inode_needs_sync(inode)) {
                        sync_dirty_buffer(bh);
                        if (buffer_req(bh) && !buffer_uptodate(bh)) {
-                               struct ext4_super_block *es;
-
-                               es = EXT4_SB(inode->i_sb)->s_es;
                                ext4_error_inode_err(inode, where, line,
                                                     bh->b_blocknr, EIO,
                                        "IO error syncing itable block");
index 031752cfb6f753648d06d9dd3312ba9507116891..f2b577b315a09371210b180934f3c0019134756e 100644 (file)
@@ -3374,8 +3374,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                (unsigned long long)map->m_lblk, map_len);
 
        sbi = EXT4_SB(inode->i_sb);
-       eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
-               inode->i_sb->s_blocksize_bits;
+       eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
+                       >> inode->i_sb->s_blocksize_bits;
        if (eof_block < map->m_lblk + map_len)
                eof_block = map->m_lblk + map_len;
 
@@ -3627,8 +3627,8 @@ static int ext4_split_convert_extents(handle_t *handle,
                  __func__, inode->i_ino,
                  (unsigned long long)map->m_lblk, map->m_len);
 
-       eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
-               inode->i_sb->s_blocksize_bits;
+       eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
+                       >> inode->i_sb->s_blocksize_bits;
        if (eof_block < map->m_lblk + map->m_len)
                eof_block = map->m_lblk + map->m_len;
        /*
index b420c9dc444de454b0bbd298d31e0d7074d210fb..4b8c9a9bdf0c8c0e77a39aaa7e0bb653d4c64b40 100644 (file)
@@ -113,7 +113,7 @@ verified:
  * Read the inode allocation bitmap for a given block_group, reading
  * into the specified slot in the superblock's bitmap cache.
  *
- * Return buffer_head of bitmap on success or NULL.
+ * Return buffer_head of bitmap on success, or an ERR_PTR on error.
  */
 static struct buffer_head *
 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
@@ -662,7 +662,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
  * block has been written back to disk.  (Yes, these values are
  * somewhat arbitrary...)
  */
-#define RECENTCY_MIN   5
+#define RECENTCY_MIN   60
 #define RECENTCY_DIRTY 300
 
 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
index e416096fc081372d0047ebe9ed90c4a2038dab46..2a4aae6acdcb9e4acb79647baa24c218fb6cca2a 100644 (file)
@@ -1973,7 +1973,7 @@ static int ext4_writepage(struct page *page,
        bool keep_towrite = false;
 
        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
-               ext4_invalidatepage(page, 0, PAGE_SIZE);
+               inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
                unlock_page(page);
                return -EIO;
        }
@@ -4364,7 +4364,7 @@ make_io:
                        if (end > table)
                                end = table;
                        while (b <= end)
-                               sb_breadahead(sb, b++);
+                               sb_breadahead_unmovable(sb, b++);
                }
 
                /*
index 87c85be4c12e568d3d272cc8cf87f634ee13cf9a..30d5d97548c42117decfcca690f70b9b48dc490b 100644 (file)
@@ -1943,7 +1943,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
        int free;
 
        free = e4b->bd_info->bb_free;
-       BUG_ON(free <= 0);
+       if (WARN_ON(free <= 0))
+               return;
 
        i = e4b->bd_info->bb_first_free;
 
@@ -1966,7 +1967,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
                }
 
                mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
-               BUG_ON(ex.fe_len <= 0);
+               if (WARN_ON(ex.fe_len <= 0))
+                       break;
                if (free < ex.fe_len) {
                        ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
                                        "%d free clusters as per "
index 9728e7b0e84fcae5b7f0d4753c1572dc08f813bb..bf5fcb477f667211f031c6f1cff42f54a1a5235d 100644 (file)
@@ -596,7 +596,6 @@ void __ext4_error_file(struct file *file, const char *function,
 {
        va_list args;
        struct va_format vaf;
-       struct ext4_super_block *es;
        struct inode *inode = file_inode(file);
        char pathname[80], *path;
 
@@ -604,7 +603,6 @@ void __ext4_error_file(struct file *file, const char *function,
                return;
 
        trace_ext4_error(inode->i_sb, function, line);
-       es = EXT4_SB(inode->i_sb)->s_es;
        if (ext4_error_ratelimit(inode->i_sb)) {
                path = file_path(file, pathname, sizeof(pathname));
                if (IS_ERR(path))
@@ -4340,7 +4338,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        /* Pre-read the descriptors into the buffer cache */
        for (i = 0; i < db_count; i++) {
                block = descriptor_loc(sb, logical_sb_block, i);
-               sb_breadahead(sb, block);
+               sb_breadahead_unmovable(sb, block);
        }
 
        for (i = 0; i < db_count; i++) {
index 5190bfb6a6657e6ead1ab2c7da97ad1dbfcc1e0e..381d50becd04a19d55f8953032238d93e839054b 100644 (file)
@@ -357,7 +357,6 @@ struct io_timeout_data {
        struct hrtimer                  timer;
        struct timespec64               ts;
        enum hrtimer_mode               mode;
-       u32                             seq_offset;
 };
 
 struct io_accept {
@@ -385,7 +384,7 @@ struct io_timeout {
        struct file                     *file;
        u64                             addr;
        int                             flags;
-       unsigned                        count;
+       u32                             count;
 };
 
 struct io_rw {
@@ -508,6 +507,7 @@ enum {
        REQ_F_FORCE_ASYNC_BIT   = IOSQE_ASYNC_BIT,
        REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
 
+       REQ_F_LINK_HEAD_BIT,
        REQ_F_LINK_NEXT_BIT,
        REQ_F_FAIL_LINK_BIT,
        REQ_F_INFLIGHT_BIT,
@@ -543,6 +543,8 @@ enum {
        /* IOSQE_BUFFER_SELECT */
        REQ_F_BUFFER_SELECT     = BIT(REQ_F_BUFFER_SELECT_BIT),
 
+       /* head of a link */
+       REQ_F_LINK_HEAD         = BIT(REQ_F_LINK_HEAD_BIT),
        /* already grabbed next link */
        REQ_F_LINK_NEXT         = BIT(REQ_F_LINK_NEXT_BIT),
        /* fail rest of links */
@@ -955,8 +957,8 @@ static inline bool __req_need_defer(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
-                                       + atomic_read(&ctx->cached_cq_overflow);
+       return req->sequence != ctx->cached_cq_tail
+                               + atomic_read(&ctx->cached_cq_overflow);
 }
 
 static inline bool req_need_defer(struct io_kiocb *req)
@@ -1437,7 +1439,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
        if (ret != -1) {
                io_cqring_fill_event(req, -ECANCELED);
                io_commit_cqring(ctx);
-               req->flags &= ~REQ_F_LINK;
+               req->flags &= ~REQ_F_LINK_HEAD;
                io_put_req(req);
                return true;
        }
@@ -1473,7 +1475,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
 
                list_del_init(&req->link_list);
                if (!list_empty(&nxt->link_list))
-                       nxt->flags |= REQ_F_LINK;
+                       nxt->flags |= REQ_F_LINK_HEAD;
                *nxtptr = nxt;
                break;
        }
@@ -1484,7 +1486,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
 }
 
 /*
- * Called if REQ_F_LINK is set, and we fail the head request
+ * Called if REQ_F_LINK_HEAD is set, and we fail the head request
  */
 static void io_fail_links(struct io_kiocb *req)
 {
@@ -1517,7 +1519,7 @@ static void io_fail_links(struct io_kiocb *req)
 
 static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
 {
-       if (likely(!(req->flags & REQ_F_LINK)))
+       if (likely(!(req->flags & REQ_F_LINK_HEAD)))
                return;
 
        /*
@@ -1669,7 +1671,7 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
 
 static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
 {
-       if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
+       if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
                return false;
 
        if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
@@ -2562,7 +2564,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
 
        req->result = 0;
        io_size = ret;
-       if (req->flags & REQ_F_LINK)
+       if (req->flags & REQ_F_LINK_HEAD)
                req->result = io_size;
 
        /*
@@ -2653,7 +2655,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
 
        req->result = 0;
        io_size = ret;
-       if (req->flags & REQ_F_LINK)
+       if (req->flags & REQ_F_LINK_HEAD)
                req->result = io_size;
 
        /*
@@ -2760,7 +2762,7 @@ static bool io_splice_punt(struct file *file)
                return false;
        if (!io_file_supports_async(file))
                return true;
-       return !(file->f_mode & O_NONBLOCK);
+       return !(file->f_flags & O_NONBLOCK);
 }
 
 static int io_splice(struct io_kiocb *req, bool force_nonblock)
@@ -4153,20 +4155,57 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
        return 1;
 }
 
+static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
+       __acquires(&req->ctx->completion_lock)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+
+       if (!req->result && !READ_ONCE(poll->canceled)) {
+               struct poll_table_struct pt = { ._key = poll->events };
+
+               req->result = vfs_poll(req->file, &pt) & poll->events;
+       }
+
+       spin_lock_irq(&ctx->completion_lock);
+       if (!req->result && !READ_ONCE(poll->canceled)) {
+               add_wait_queue(poll->head, &poll->wait);
+               return true;
+       }
+
+       return false;
+}
+
 static void io_async_task_func(struct callback_head *cb)
 {
        struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
        struct async_poll *apoll = req->apoll;
        struct io_ring_ctx *ctx = req->ctx;
+       bool canceled;
 
        trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
 
-       WARN_ON_ONCE(!list_empty(&req->apoll->poll.wait.entry));
+       if (io_poll_rewait(req, &apoll->poll)) {
+               spin_unlock_irq(&ctx->completion_lock);
+               return;
+       }
 
-       if (hash_hashed(&req->hash_node)) {
-               spin_lock_irq(&ctx->completion_lock);
+       if (hash_hashed(&req->hash_node))
                hash_del(&req->hash_node);
-               spin_unlock_irq(&ctx->completion_lock);
+
+       canceled = READ_ONCE(apoll->poll.canceled);
+       if (canceled) {
+               io_cqring_fill_event(req, -ECANCELED);
+               io_commit_cqring(ctx);
+       }
+
+       spin_unlock_irq(&ctx->completion_lock);
+
+       if (canceled) {
+               kfree(apoll);
+               io_cqring_ev_posted(ctx);
+               req_set_fail_links(req);
+               io_put_req(req);
+               return;
        }
 
        /* restore ->work in case we need to retry again */
@@ -4315,11 +4354,13 @@ static bool __io_poll_remove_one(struct io_kiocb *req,
 
 static bool io_poll_remove_one(struct io_kiocb *req)
 {
+       struct async_poll *apoll = NULL;
        bool do_complete;
 
        if (req->opcode == IORING_OP_POLL_ADD) {
                do_complete = __io_poll_remove_one(req, &req->poll);
        } else {
+               apoll = req->apoll;
                /* non-poll requests have submit ref still */
                do_complete = __io_poll_remove_one(req, &req->apoll->poll);
                if (do_complete)
@@ -4328,6 +4369,14 @@ static bool io_poll_remove_one(struct io_kiocb *req)
 
        hash_del(&req->hash_node);
 
+       if (apoll) {
+               /*
+                * restore ->work because we need to call io_req_work_drop_env.
+                */
+               memcpy(&req->work, &apoll->work, sizeof(req->work));
+               kfree(apoll);
+       }
+
        if (do_complete) {
                io_cqring_fill_event(req, -ECANCELED);
                io_commit_cqring(req->ctx);
@@ -4342,7 +4391,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
 {
        struct hlist_node *tmp;
        struct io_kiocb *req;
-       int i;
+       int posted = 0, i;
 
        spin_lock_irq(&ctx->completion_lock);
        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
@@ -4350,11 +4399,12 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
 
                list = &ctx->cancel_hash[i];
                hlist_for_each_entry_safe(req, tmp, list, hash_node)
-                       io_poll_remove_one(req);
+                       posted += io_poll_remove_one(req);
        }
        spin_unlock_irq(&ctx->completion_lock);
 
-       io_cqring_ev_posted(ctx);
+       if (posted)
+               io_cqring_ev_posted(ctx);
 }
 
 static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
@@ -4423,18 +4473,11 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
        struct io_ring_ctx *ctx = req->ctx;
        struct io_poll_iocb *poll = &req->poll;
 
-       if (!req->result && !READ_ONCE(poll->canceled)) {
-               struct poll_table_struct pt = { ._key = poll->events };
-
-               req->result = vfs_poll(req->file, &pt) & poll->events;
-       }
-
-       spin_lock_irq(&ctx->completion_lock);
-       if (!req->result && !READ_ONCE(poll->canceled)) {
-               add_wait_queue(poll->head, &poll->wait);
+       if (io_poll_rewait(req, poll)) {
                spin_unlock_irq(&ctx->completion_lock);
                return;
        }
+
        hash_del(&req->hash_node);
        io_poll_complete(req, req->result, 0);
        req->flags |= REQ_F_COMP_LOCKED;
@@ -4665,11 +4708,12 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
 static int io_timeout(struct io_kiocb *req)
 {
-       unsigned count;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_timeout_data *data;
        struct list_head *entry;
        unsigned span = 0;
+       u32 count = req->timeout.count;
+       u32 seq = req->sequence;
 
        data = &req->io->timeout;
 
@@ -4678,7 +4722,6 @@ static int io_timeout(struct io_kiocb *req)
         * timeout event to be satisfied. If it isn't set, then this is
         * a pure timeout request, sequence isn't used.
         */
-       count = req->timeout.count;
        if (!count) {
                req->flags |= REQ_F_TIMEOUT_NOSEQ;
                spin_lock_irq(&ctx->completion_lock);
@@ -4686,8 +4729,7 @@ static int io_timeout(struct io_kiocb *req)
                goto add;
        }
 
-       req->sequence = ctx->cached_sq_head + count - 1;
-       data->seq_offset = count;
+       req->sequence = seq + count;
 
        /*
         * Insertion sort, ensuring the first entry in the list is always
@@ -4696,26 +4738,26 @@ static int io_timeout(struct io_kiocb *req)
        spin_lock_irq(&ctx->completion_lock);
        list_for_each_prev(entry, &ctx->timeout_list) {
                struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
-               unsigned nxt_sq_head;
+               unsigned nxt_seq;
                long long tmp, tmp_nxt;
-               u32 nxt_offset = nxt->io->timeout.seq_offset;
+               u32 nxt_offset = nxt->timeout.count;
 
                if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
                        continue;
 
                /*
-                * Since cached_sq_head + count - 1 can overflow, use type long
+                * Since seq + count can overflow, use type long
                 * long to store it.
                 */
-               tmp = (long long)ctx->cached_sq_head + count - 1;
-               nxt_sq_head = nxt->sequence - nxt_offset + 1;
-               tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1;
+               tmp = (long long)seq + count;
+               nxt_seq = nxt->sequence - nxt_offset;
+               tmp_nxt = (long long)nxt_seq + nxt_offset;
 
                /*
                 * cached_sq_head may overflow, and it will never overflow twice
                 * once there is some timeout req still be valid.
                 */
-               if (ctx->cached_sq_head < nxt_sq_head)
+               if (seq < nxt_seq)
                        tmp += UINT_MAX;
 
                if (tmp > tmp_nxt)
@@ -5476,7 +5518,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
 {
        struct io_kiocb *nxt;
 
-       if (!(req->flags & REQ_F_LINK))
+       if (!(req->flags & REQ_F_LINK_HEAD))
                return NULL;
        /* for polled retry, if flag is set, we already went through here */
        if (req->flags & REQ_F_POLLED)
@@ -5604,54 +5646,11 @@ static inline void io_queue_link_head(struct io_kiocb *req)
                io_queue_sqe(req, NULL);
 }
 
-#define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
-                               IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
-                               IOSQE_BUFFER_SELECT)
-
-static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                          struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       unsigned int sqe_flags;
-       int ret, id, fd;
-
-       sqe_flags = READ_ONCE(sqe->flags);
-
-       /* enforce forwards compatibility on users */
-       if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
-               ret = -EINVAL;
-               goto err_req;
-       }
-
-       if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
-           !io_op_defs[req->opcode].buffer_select) {
-               ret = -EOPNOTSUPP;
-               goto err_req;
-       }
-
-       id = READ_ONCE(sqe->personality);
-       if (id) {
-               req->work.creds = idr_find(&ctx->personality_idr, id);
-               if (unlikely(!req->work.creds)) {
-                       ret = -EINVAL;
-                       goto err_req;
-               }
-               get_cred(req->work.creds);
-       }
-
-       /* same numerical values with corresponding REQ_F_*, safe to copy */
-       req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
-                                       IOSQE_ASYNC | IOSQE_FIXED_FILE |
-                                       IOSQE_BUFFER_SELECT);
-
-       fd = READ_ONCE(sqe->fd);
-       ret = io_req_set_file(state, req, fd, sqe_flags);
-       if (unlikely(ret)) {
-err_req:
-               io_cqring_add_event(req, ret);
-               io_double_put_req(req);
-               return false;
-       }
+       int ret;
 
        /*
         * If we already have a head request, queue this one for async
@@ -5670,42 +5669,39 @@ err_req:
                 * next after the link request. The last one is done via
                 * drain_next flag to persist the effect across calls.
                 */
-               if (sqe_flags & IOSQE_IO_DRAIN) {
+               if (req->flags & REQ_F_IO_DRAIN) {
                        head->flags |= REQ_F_IO_DRAIN;
                        ctx->drain_next = 1;
                }
-               if (io_alloc_async_ctx(req)) {
-                       ret = -EAGAIN;
-                       goto err_req;
-               }
+               if (io_alloc_async_ctx(req))
+                       return -EAGAIN;
 
                ret = io_req_defer_prep(req, sqe);
                if (ret) {
                        /* fail even hard links since we don't submit */
                        head->flags |= REQ_F_FAIL_LINK;
-                       goto err_req;
+                       return ret;
                }
                trace_io_uring_link(ctx, req, head);
                list_add_tail(&req->link_list, &head->link_list);
 
                /* last request of a link, enqueue the link */
-               if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
+               if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
                        io_queue_link_head(head);
                        *link = NULL;
                }
        } else {
                if (unlikely(ctx->drain_next)) {
                        req->flags |= REQ_F_IO_DRAIN;
-                       req->ctx->drain_next = 0;
+                       ctx->drain_next = 0;
                }
-               if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
-                       req->flags |= REQ_F_LINK;
+               if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+                       req->flags |= REQ_F_LINK_HEAD;
                        INIT_LIST_HEAD(&req->link_list);
 
-                       if (io_alloc_async_ctx(req)) {
-                               ret = -EAGAIN;
-                               goto err_req;
-                       }
+                       if (io_alloc_async_ctx(req))
+                               return -EAGAIN;
+
                        ret = io_req_defer_prep(req, sqe);
                        if (ret)
                                req->flags |= REQ_F_FAIL_LINK;
@@ -5715,7 +5711,7 @@ err_req:
                }
        }
 
-       return true;
+       return 0;
 }
 
 /*
@@ -5789,15 +5785,23 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx)
        ctx->cached_sq_head++;
 }
 
-static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                       const struct io_uring_sqe *sqe)
+#define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
+                               IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
+                               IOSQE_BUFFER_SELECT)
+
+static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                      const struct io_uring_sqe *sqe,
+                      struct io_submit_state *state, bool async)
 {
+       unsigned int sqe_flags;
+       int id, fd;
+
        /*
         * All io need record the previous position, if LINK vs DARIN,
         * it can be used to mark the position of the first IO in the
         * link list.
         */
-       req->sequence = ctx->cached_sq_head;
+       req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
        req->opcode = READ_ONCE(sqe->opcode);
        req->user_data = READ_ONCE(sqe->user_data);
        req->io = NULL;
@@ -5808,17 +5812,50 @@ static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        refcount_set(&req->refs, 2);
        req->task = NULL;
        req->result = 0;
+       req->needs_fixed_file = async;
        INIT_IO_WORK(&req->work, io_wq_submit_work);
+
+       if (unlikely(req->opcode >= IORING_OP_LAST))
+               return -EINVAL;
+
+       if (io_op_defs[req->opcode].needs_mm && !current->mm) {
+               if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
+                       return -EFAULT;
+               use_mm(ctx->sqo_mm);
+       }
+
+       sqe_flags = READ_ONCE(sqe->flags);
+       /* enforce forwards compatibility on users */
+       if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
+               return -EINVAL;
+
+       if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
+           !io_op_defs[req->opcode].buffer_select)
+               return -EOPNOTSUPP;
+
+       id = READ_ONCE(sqe->personality);
+       if (id) {
+               req->work.creds = idr_find(&ctx->personality_idr, id);
+               if (unlikely(!req->work.creds))
+                       return -EINVAL;
+               get_cred(req->work.creds);
+       }
+
+       /* same numerical values with corresponding REQ_F_*, safe to copy */
+       req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
+                                       IOSQE_ASYNC | IOSQE_FIXED_FILE |
+                                       IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
+
+       fd = READ_ONCE(sqe->fd);
+       return io_req_set_file(state, req, fd, sqe_flags);
 }
 
 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
-                         struct file *ring_file, int ring_fd,
-                         struct mm_struct **mm, bool async)
+                         struct file *ring_file, int ring_fd, bool async)
 {
        struct io_submit_state state, *statep = NULL;
        struct io_kiocb *link = NULL;
        int i, submitted = 0;
-       bool mm_fault = false;
 
        /* if we have a backlog and couldn't flush it all, return BUSY */
        if (test_bit(0, &ctx->sq_check_overflow)) {
@@ -5858,34 +5895,23 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                        break;
                }
 
-               io_init_req(ctx, req, sqe);
+               err = io_init_req(ctx, req, sqe, statep, async);
                io_consume_sqe(ctx);
                /* will complete beyond this point, count as submitted */
                submitted++;
 
-               if (unlikely(req->opcode >= IORING_OP_LAST)) {
-                       err = -EINVAL;
+               if (unlikely(err)) {
 fail_req:
                        io_cqring_add_event(req, err);
                        io_double_put_req(req);
                        break;
                }
 
-               if (io_op_defs[req->opcode].needs_mm && !*mm) {
-                       mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
-                       if (unlikely(mm_fault)) {
-                               err = -EFAULT;
-                               goto fail_req;
-                       }
-                       use_mm(ctx->sqo_mm);
-                       *mm = ctx->sqo_mm;
-               }
-
-               req->needs_fixed_file = async;
                trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
                                                true, async);
-               if (!io_submit_sqe(req, sqe, statep, &link))
-                       break;
+               err = io_submit_sqe(req, sqe, statep, &link);
+               if (err)
+                       goto fail_req;
        }
 
        if (unlikely(submitted != nr)) {
@@ -5904,10 +5930,19 @@ fail_req:
        return submitted;
 }
 
+static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
+{
+       struct mm_struct *mm = current->mm;
+
+       if (mm) {
+               unuse_mm(mm);
+               mmput(mm);
+       }
+}
+
 static int io_sq_thread(void *data)
 {
        struct io_ring_ctx *ctx = data;
-       struct mm_struct *cur_mm = NULL;
        const struct cred *old_cred;
        mm_segment_t old_fs;
        DEFINE_WAIT(wait);
@@ -5948,11 +5983,7 @@ static int io_sq_thread(void *data)
                         * adding ourselves to the waitqueue, as the unuse/drop
                         * may sleep.
                         */
-                       if (cur_mm) {
-                               unuse_mm(cur_mm);
-                               mmput(cur_mm);
-                               cur_mm = NULL;
-                       }
+                       io_sq_thread_drop_mm(ctx);
 
                        /*
                         * We're polling. If we're within the defined idle
@@ -6016,7 +6047,7 @@ static int io_sq_thread(void *data)
                }
 
                mutex_lock(&ctx->uring_lock);
-               ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+               ret = io_submit_sqes(ctx, to_submit, NULL, -1, true);
                mutex_unlock(&ctx->uring_lock);
                timeout = jiffies + ctx->sq_thread_idle;
        }
@@ -6025,10 +6056,7 @@ static int io_sq_thread(void *data)
                task_work_run();
 
        set_fs(old_fs);
-       if (cur_mm) {
-               unuse_mm(cur_mm);
-               mmput(cur_mm);
-       }
+       io_sq_thread_drop_mm(ctx);
        revert_creds(old_cred);
 
        kthread_parkme();
@@ -7509,13 +7537,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        wake_up(&ctx->sqo_wait);
                submitted = to_submit;
        } else if (to_submit) {
-               struct mm_struct *cur_mm;
-
                mutex_lock(&ctx->uring_lock);
-               /* already have mm, so io_submit_sqes() won't try to grab it */
-               cur_mm = ctx->sqo_mm;
-               submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
-                                          &cur_mm, false);
+               submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false);
                mutex_unlock(&ctx->uring_lock);
 
                if (submitted != to_submit)
index f2dc35c22964c3da42da19c2ff954e7cdd7274f3..b8d78f3933651a2c042bf92ff3bebed471d6c69e 100644 (file)
@@ -2023,6 +2023,7 @@ lookup_again:
                        goto lookup_again;
                }
 
+               spin_unlock(&ino->i_lock);
                first = true;
                status = nfs4_select_rw_stateid(ctx->state,
                                        iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
@@ -2032,12 +2033,12 @@ lookup_again:
                        trace_pnfs_update_layout(ino, pos, count,
                                        iomode, lo, lseg,
                                        PNFS_UPDATE_LAYOUT_INVALID_OPEN);
-                       spin_unlock(&ino->i_lock);
                        nfs4_schedule_stateid_recovery(server, ctx->state);
                        pnfs_clear_first_layoutget(lo);
                        pnfs_put_layout_hdr(lo);
                        goto lookup_again;
                }
+               spin_lock(&ino->i_lock);
        } else {
                nfs4_stateid_copy(&stateid, &lo->plh_stateid);
        }
index 6042b646ab2797acaa50ad0023e932300a7a42af..572898dd16a00c4b38c5e02383c6505968a23d6a 100644 (file)
@@ -1573,6 +1573,7 @@ static ssize_t timens_offsets_write(struct file *file, const char __user *buf,
        noffsets = 0;
        for (pos = kbuf; pos; pos = next_line) {
                struct proc_timens_offset *off = &offsets[noffsets];
+               char clock[10];
                int err;
 
                /* Find the end of line and ensure we don't look past it */
@@ -1584,10 +1585,21 @@ static ssize_t timens_offsets_write(struct file *file, const char __user *buf,
                                next_line = NULL;
                }
 
-               err = sscanf(pos, "%u %lld %lu", &off->clockid,
+               err = sscanf(pos, "%9s %lld %lu", clock,
                                &off->val.tv_sec, &off->val.tv_nsec);
                if (err != 3 || off->val.tv_nsec >= NSEC_PER_SEC)
                        goto out;
+
+               clock[sizeof(clock) - 1] = 0;
+               if (strcmp(clock, "monotonic") == 0 ||
+                   strcmp(clock, __stringify(CLOCK_MONOTONIC)) == 0)
+                       off->clockid = CLOCK_MONOTONIC;
+               else if (strcmp(clock, "boottime") == 0 ||
+                        strcmp(clock, __stringify(CLOCK_BOOTTIME)) == 0)
+                       off->clockid = CLOCK_BOOTTIME;
+               else
+                       goto out;
+
                noffsets++;
                if (noffsets == ARRAY_SIZE(offsets)) {
                        if (next_line)
index 2633f10446c3befc0710775a78fa340cb7224889..cdbe9293ea55148d284d8990a563702a9f6f7a55 100644 (file)
@@ -196,6 +196,13 @@ static void proc_kill_sb(struct super_block *sb)
        if (ns->proc_thread_self)
                dput(ns->proc_thread_self);
        kill_anon_super(sb);
+
+       /* Make the pid namespace safe for the next mount of proc */
+       ns->proc_self = NULL;
+       ns->proc_thread_self = NULL;
+       ns->pid_gid = GLOBAL_ROOT_GID;
+       ns->hide_pid = 0;
+
        put_pid_ns(ns);
 }
 
index a7be7a9e5c1ae09ef9515ce04e6df550587dd32d..8bf1d15be3f6a047c0727668641016e64fa11360 100644 (file)
@@ -911,7 +911,12 @@ xfs_eofblocks_worker(
 {
        struct xfs_mount *mp = container_of(to_delayed_work(work),
                                struct xfs_mount, m_eofblocks_work);
+
+       if (!sb_start_write_trylock(mp->m_super))
+               return;
        xfs_icache_free_eofblocks(mp, NULL);
+       sb_end_write(mp->m_super);
+
        xfs_queue_eofblocks(mp);
 }
 
@@ -938,7 +943,12 @@ xfs_cowblocks_worker(
 {
        struct xfs_mount *mp = container_of(to_delayed_work(work),
                                struct xfs_mount, m_cowblocks_work);
+
+       if (!sb_start_write_trylock(mp->m_super))
+               return;
        xfs_icache_free_cowblocks(mp, NULL);
+       sb_end_write(mp->m_super);
+
        xfs_queue_cowblocks(mp);
 }
 
index cdfb3cd9a25bdc523bca574926d5d9da30b10180..309958186d338099cf52e7d9a8a3ce579c626a20 100644 (file)
@@ -2363,7 +2363,10 @@ xfs_file_ioctl(
                if (error)
                        return error;
 
-               return xfs_icache_free_eofblocks(mp, &keofb);
+               sb_start_write(mp->m_super);
+               error = xfs_icache_free_eofblocks(mp, &keofb);
+               sb_end_write(mp->m_super);
+               return error;
        }
 
        default:
index 50c43422fa170eb2e7c8b91dc5329b834750a2e0..b2e4598fdf7d3e17e1dba5ac4d48cb10f07ec05f 100644 (file)
@@ -167,8 +167,12 @@ typedef struct xfs_mount {
        struct xfs_kobj         m_error_meta_kobj;
        struct xfs_error_cfg    m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
        struct xstats           m_stats;        /* per-fs stats */
-       struct ratelimit_state  m_flush_inodes_ratelimit;
 
+       /*
+        * Workqueue item so that we can coalesce multiple inode flush attempts
+        * into a single flush.
+        */
+       struct work_struct      m_flush_inodes_work;
        struct workqueue_struct *m_buf_workqueue;
        struct workqueue_struct *m_unwritten_workqueue;
        struct workqueue_struct *m_cil_workqueue;
index b0ce04ffd3cd2ddeebc6db96dce35d61cd6413da..107bf2a2f3448a075d66f67fdeed880e5dc6c004 100644 (file)
@@ -1051,6 +1051,7 @@ xfs_reflink_remap_extent(
                uirec.br_startblock = irec->br_startblock + rlen;
                uirec.br_startoff = irec->br_startoff + rlen;
                uirec.br_blockcount = unmap_len - rlen;
+               uirec.br_state = irec->br_state;
                unmap_len = rlen;
 
                /* If this isn't a real mapping, we're done. */
index abf06bf9c3f35021e4fb871c3819ace8716b8849..424bb9a2d53256b2d65ef3f6291ec6bbd53b642c 100644 (file)
@@ -516,6 +516,20 @@ xfs_destroy_mount_workqueues(
        destroy_workqueue(mp->m_buf_workqueue);
 }
 
+static void
+xfs_flush_inodes_worker(
+       struct work_struct      *work)
+{
+       struct xfs_mount        *mp = container_of(work, struct xfs_mount,
+                                                  m_flush_inodes_work);
+       struct super_block      *sb = mp->m_super;
+
+       if (down_read_trylock(&sb->s_umount)) {
+               sync_inodes_sb(sb);
+               up_read(&sb->s_umount);
+       }
+}
+
 /*
  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
@@ -526,15 +540,15 @@ void
 xfs_flush_inodes(
        struct xfs_mount        *mp)
 {
-       struct super_block      *sb = mp->m_super;
-
-       if (!__ratelimit(&mp->m_flush_inodes_ratelimit))
+       /*
+        * If flush_work() returns true then that means we waited for a flush
+        * which was already in progress.  Don't bother running another scan.
+        */
+       if (flush_work(&mp->m_flush_inodes_work))
                return;
 
-       if (down_read_trylock(&sb->s_umount)) {
-               sync_inodes_sb(sb);
-               up_read(&sb->s_umount);
-       }
+       queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
+       flush_work(&mp->m_flush_inodes_work);
 }
 
 /* Catch misguided souls that try to use this interface on XFS */
@@ -1369,17 +1383,6 @@ xfs_fc_fill_super(
        if (error)
                goto out_free_names;
 
-       /*
-        * Cap the number of invocations of xfs_flush_inodes to 16 for every
-        * quarter of a second.  The magic numbers here were determined by
-        * observation neither to cause stalls in writeback when there are a
-        * lot of IO threads and the fs is near ENOSPC, nor cause any fstest
-        * regressions.  YMMV.
-        */
-       ratelimit_state_init(&mp->m_flush_inodes_ratelimit, HZ / 4, 16);
-       ratelimit_set_flags(&mp->m_flush_inodes_ratelimit,
-                       RATELIMIT_MSG_ON_RELEASE);
-
        error = xfs_init_mount_workqueues(mp);
        if (error)
                goto out_close_devices;
@@ -1752,6 +1755,7 @@ static int xfs_init_fs_context(
        spin_lock_init(&mp->m_perag_lock);
        mutex_init(&mp->m_growlock);
        atomic_set(&mp->m_active_trans, 0);
+       INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
        INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
        INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
        INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
index b3f1082cc435c84efccaa11ec4a0bfc0260568dd..1c4fd950f09123d6802b0dd82be00757eb815992 100644 (file)
@@ -163,7 +163,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
        return nr_bank;
 }
 
-void hyperv_report_panic(struct pt_regs *regs, long err);
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
 void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
 bool hv_is_hyperv_initialized(void);
 bool hv_is_hibernation_supported(void);
index 3ed5dee899fd16af8f3facdcc585e1e93ee47abd..eb259c2547af782bfecab9a862c809bb6a406580 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
 
 #ifndef _DRM_CLIENT_H_
 #define _DRM_CLIENT_H_
@@ -188,6 +188,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
        drm_for_each_connector_iter(connector, iter) \
                if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
 
-int drm_client_debugfs_init(struct drm_minor *minor);
+void drm_client_debugfs_init(struct drm_minor *minor);
 
 #endif
index 19ae6bb5c85be15d15b7589bf17591332bc0e653..fd543d1db9b2c5042dba43b8d6d6676098210d09 100644 (file)
@@ -1617,9 +1617,9 @@ struct drm_tile_group {
 };
 
 struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
-                                                 char topology[8]);
+                                                 const char topology[8]);
 struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
-                                              char topology[8]);
+                                              const char topology[8]);
 void drm_mode_put_tile_group(struct drm_device *dev,
                             struct drm_tile_group *tg);
 
index 7501e323d38370fa9410520b7a0618e092900eea..2188dc83957fd22beea320444a4ad1bda3285e4f 100644 (file)
@@ -80,18 +80,16 @@ struct drm_info_node {
 };
 
 #if defined(CONFIG_DEBUG_FS)
-int drm_debugfs_create_files(const struct drm_info_list *files,
-                            int count, struct dentry *root,
-                            struct drm_minor *minor);
+void drm_debugfs_create_files(const struct drm_info_list *files,
+                             int count, struct dentry *root,
+                             struct drm_minor *minor);
 int drm_debugfs_remove_files(const struct drm_info_list *files,
                             int count, struct drm_minor *minor);
 #else
-static inline int drm_debugfs_create_files(const struct drm_info_list *files,
-                                          int count, struct dentry *root,
-                                          struct drm_minor *minor)
-{
-       return 0;
-}
+static inline void drm_debugfs_create_files(const struct drm_info_list *files,
+                                           int count, struct dentry *root,
+                                           struct drm_minor *minor)
+{}
 
 static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
                                           int count, struct drm_minor *minor)
index bb60a949f416ff1e962fddac4f73b45ee737eedb..a55874db9dd446d896476c3f0d429fc24ad6abe0 100644 (file)
@@ -67,15 +67,33 @@ struct drm_device {
        /** @dev: Device structure of bus-device */
        struct device *dev;
 
+       /**
+        * @managed:
+        *
+        * Managed resources linked to the lifetime of this &drm_device as
+        * tracked by @ref.
+        */
+       struct {
+               /** @managed.resources: managed resources list */
+               struct list_head resources;
+               /** @managed.final_kfree: pointer for final kfree() call */
+               void *final_kfree;
+               /** @managed.lock: protects @managed.resources */
+               spinlock_t lock;
+       } managed;
+
        /** @driver: DRM driver managing the device */
        struct drm_driver *driver;
 
        /**
         * @dev_private:
         *
-        * DRM driver private data. Instead of using this pointer it is
-        * recommended that drivers use drm_dev_init() and embed struct
-        * &drm_device in their larger per-device structure.
+        * DRM driver private data. This is deprecated and should be left set to
+        * NULL.
+        *
+        * Instead of using this pointer it is recommended that drivers use
+        * drm_dev_init() and embed struct &drm_device in their larger
+        * per-device structure.
         */
        void *dev_private;
 
index 9d3b745c3107763a84269eb5a48484a698bb4bdd..27bdd273fc4e7d340d72fea2cedf288f9437f054 100644 (file)
@@ -97,7 +97,7 @@ struct displayid_detailed_timing_block {
             (idx) + sizeof(struct displayid_block) <= (length) && \
             (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
             (block)->num_bytes > 0; \
-            (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
+            (idx) += sizeof(struct displayid_block) + (block)->num_bytes, \
             (block) = (struct displayid_block *)&(displayid)[idx])
 
 #endif
index cd44509772cb28c84d081b03a60168f0e50a1a22..2035ac44afde0b516f8e42250958cc1d7bda78da 100644 (file)
 #define DP_DSC_PEAK_THROUGHPUT              0x06B
 # define DP_DSC_THROUGHPUT_MODE_0_MASK      (0xf << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_SHIFT     0
-# define DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED 0
+# define DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED 0
 # define DP_DSC_THROUGHPUT_MODE_0_340       (1 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_400       (2 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_450       (3 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_170       (15 << 0) /* 1.4a */
 # define DP_DSC_THROUGHPUT_MODE_1_MASK      (0xf << 4)
 # define DP_DSC_THROUGHPUT_MODE_1_SHIFT     4
-# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
+# define DP_DSC_THROUGHPUT_MODE_1_UNSUPPORTED 0
 # define DP_DSC_THROUGHPUT_MODE_1_340       (1 << 4)
 # define DP_DSC_THROUGHPUT_MODE_1_400       (2 << 4)
 # define DP_DSC_THROUGHPUT_MODE_1_450       (3 << 4)
index 3cde42b333c36c0d88b74fdbc63df438c7f3e2b9..9e1ffcd7cb6837d191d3110fc1b1c4b279f0fd87 100644 (file)
@@ -157,19 +157,45 @@ struct drm_dp_mst_port {
         */
        bool has_audio;
 
+       /**
+        * @fec_capable: bool indicating if FEC can be supported up to that
+        * point in the MST topology.
+        */
        bool fec_capable;
 };
 
+/* sideband msg header - not bit struct */
+struct drm_dp_sideband_msg_hdr {
+       u8 lct;
+       u8 lcr;
+       u8 rad[8];
+       bool broadcast;
+       bool path_msg;
+       u8 msg_len;
+       bool somt;
+       bool eomt;
+       bool seqno;
+};
+
+struct drm_dp_sideband_msg_rx {
+       u8 chunk[48];
+       u8 msg[256];
+       u8 curchunk_len;
+       u8 curchunk_idx; /* chunk we are parsing now */
+       u8 curchunk_hdrlen;
+       u8 curlen; /* total length of the msg */
+       bool have_somt;
+       bool have_eomt;
+       struct drm_dp_sideband_msg_hdr initial_hdr;
+};
+
 /**
  * struct drm_dp_mst_branch - MST branch device.
  * @rad: Relative Address to talk to this branch device.
  * @lct: Link count total to talk to this branch device.
  * @num_ports: number of ports on the branch.
- * @msg_slots: one bit per transmitted msg slot.
  * @port_parent: pointer to the port parent, NULL if toplevel.
  * @mgr: topology manager for this branch device.
- * @tx_slots: transmission slots for this device.
- * @last_seqno: last sequence number used to talk to this.
  * @link_address_sent: if a link address message has been sent to this device yet.
  * @guid: guid for DP 1.2 branch device. port under this branch can be
  * identified by port #.
@@ -210,7 +236,6 @@ struct drm_dp_mst_branch {
        u8 lct;
        int num_ports;
 
-       int msg_slots;
        /**
         * @ports: the list of ports on this branch device. This should be
         * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
@@ -223,13 +248,9 @@ struct drm_dp_mst_branch {
         */
        struct list_head ports;
 
-       /* list of tx ops queue for this port */
        struct drm_dp_mst_port *port_parent;
        struct drm_dp_mst_topology_mgr *mgr;
 
-       /* slots are protected by mstb->mgr->qlock */
-       struct drm_dp_sideband_msg_tx *tx_slots[2];
-       int last_seqno;
        bool link_address_sent;
 
        /* global unique identifier to identify branch devices */
@@ -237,19 +258,6 @@ struct drm_dp_mst_branch {
 };
 
 
-/* sideband msg header - not bit struct */
-struct drm_dp_sideband_msg_hdr {
-       u8 lct;
-       u8 lcr;
-       u8 rad[8];
-       bool broadcast;
-       bool path_msg;
-       u8 msg_len;
-       bool somt;
-       bool eomt;
-       bool seqno;
-};
-
 struct drm_dp_nak_reply {
        u8 guid[16];
        u8 reason;
@@ -306,18 +314,6 @@ struct drm_dp_remote_i2c_write_ack_reply {
 };
 
 
-struct drm_dp_sideband_msg_rx {
-       u8 chunk[48];
-       u8 msg[256];
-       u8 curchunk_len;
-       u8 curchunk_idx; /* chunk we are parsing now */
-       u8 curchunk_hdrlen;
-       u8 curlen; /* total length of the msg */
-       bool have_somt;
-       bool have_eomt;
-       struct drm_dp_sideband_msg_hdr initial_hdr;
-};
-
 #define DRM_DP_MAX_SDP_STREAMS 16
 struct drm_dp_allocate_payload {
        u8 port_number;
@@ -479,8 +475,6 @@ struct drm_dp_mst_topology_mgr;
 struct drm_dp_mst_topology_cbs {
        /* create a connector for a port */
        struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
-       void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
-                                 struct drm_connector *connector);
 };
 
 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
@@ -555,15 +549,17 @@ struct drm_dp_mst_topology_mgr {
         */
        int conn_base_id;
 
-       /**
-        * @down_rep_recv: Message receiver state for down replies.
-        */
-       struct drm_dp_sideband_msg_rx down_rep_recv;
        /**
         * @up_req_recv: Message receiver state for up requests.
         */
        struct drm_dp_sideband_msg_rx up_req_recv;
 
+       /**
+        * @down_rep_recv: Message receiver state for replies to down
+        * requests.
+        */
+       struct drm_dp_sideband_msg_rx down_rep_recv;
+
        /**
         * @lock: protects @mst_state, @mst_primary, @dpcd, and
         * @payload_id_table_cleared.
@@ -589,11 +585,6 @@ struct drm_dp_mst_topology_mgr {
         */
        bool payload_id_table_cleared : 1;
 
-       /**
-        * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
-        */
-       bool is_waiting_for_dwn_reply : 1;
-
        /**
         * @mst_primary: Pointer to the primary/first branch device.
         */
@@ -618,13 +609,12 @@ struct drm_dp_mst_topology_mgr {
        const struct drm_private_state_funcs *funcs;
 
        /**
-        * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
-        * &drm_dp_sideband_msg_tx.state once they are queued
+        * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
         */
        struct mutex qlock;
 
        /**
-        * @tx_msg_downq: List of pending down replies.
+        * @tx_msg_downq: List of pending down requests
         */
        struct list_head tx_msg_downq;
 
@@ -734,8 +724,6 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
                       struct drm_dp_mst_topology_mgr *mgr,
                       struct drm_dp_mst_port *port);
 
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
-                                       struct drm_dp_mst_port *port);
 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
index 97109df5beac9a237d3864fb0e8e88693bbd682a..6d457652f199fa3f67225d87169ca59bb7525360 100644 (file)
@@ -262,9 +262,11 @@ struct drm_driver {
         * @release:
         *
         * Optional callback for destroying device data after the final
-        * reference is released, i.e. the device is being destroyed. Drivers
-        * using this callback are responsible for calling drm_dev_fini()
-        * to finalize the device and then freeing the struct themselves.
+        * reference is released, i.e. the device is being destroyed.
+        *
+        * This is deprecated, clean up all memory allocations associated with a
+        * &drm_device using drmm_add_action(), drmm_kmalloc() and related
+        * managed resources functions.
         */
        void (*release) (struct drm_device *);
 
@@ -323,7 +325,7 @@ struct drm_driver {
         *
         * Allows drivers to create driver-specific debugfs files.
         */
-       int (*debugfs_init)(struct drm_minor *minor);
+       void (*debugfs_init)(struct drm_minor *minor);
 
        /**
         * @gem_free_object: deconstructor for drm_gem_objects
@@ -620,7 +622,39 @@ int drm_dev_init(struct drm_device *dev,
 int devm_drm_dev_init(struct device *parent,
                      struct drm_device *dev,
                      struct drm_driver *driver);
-void drm_dev_fini(struct drm_device *dev);
+
+void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+                          size_t size, size_t offset);
+
+/**
+ * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
+ * @parent: Parent device object
+ * @driver: DRM driver
+ * @type: the type of the struct which contains struct &drm_device
+ * @member: the name of the &drm_device within @type.
+ *
+ * This allocates and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems. This should be done last in the device
+ * initialization sequence to make sure userspace can't access an inconsistent
+ * state.
+ *
+ * The initial ref-count of the object is 1. Use drm_dev_get() and
+ * drm_dev_put() to take and drop further ref-counts.
+ *
+ * It is recommended that drivers embed &struct drm_device into their own device
+ * structure.
+ *
+ * Note that this manages the lifetime of the resulting &drm_device
+ * automatically using devres. The DRM device initialized with this function is
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+#define devm_drm_dev_alloc(parent, driver, type, member) \
+       ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \
+                                      offsetof(type, member)))
 
 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                                 struct device *parent);
index 4370e039c015a7501a073297c0e51e75761b278a..a60f5f1555acf9875015951ee371d6db7281690a 100644 (file)
@@ -142,7 +142,7 @@ struct drm_encoder {
         * the bits for all &drm_crtc objects this encoder can be connected to
         * before calling drm_dev_register().
         *
-        * In reality almost every driver gets this wrong.
+        * You will get a WARN if you get this wrong in the driver.
         *
         * Note that since CRTC objects can't be hotplugged the assigned indices
         * are stable and hence known before registering all objects.
@@ -159,7 +159,11 @@ struct drm_encoder {
         * encoders can be used in a cloned configuration, they both should have
         * each another bits set.
         *
-        * In reality almost every driver gets this wrong.
+        * As an exception to the above rule if the driver doesn't implement
+        * any cloning it can leave @possible_clones set to 0. The core will
+        * automagically fix this up by setting the bit for the encoder itself.
+        *
+        * You will get a WARN if you get this wrong in the driver.
         *
         * Note that since encoder objects can't be hotplugged the assigned indices
         * are stable and hence known before registering all objects.
index 208dbf87afa3ed7a7496798a45ffb365e6643be8..306aa3a60be96016e6b5d8744d77ec2498a45595 100644 (file)
@@ -269,7 +269,8 @@ int drm_fb_helper_debug_leave(struct fb_info *info);
 void drm_fb_helper_lastclose(struct drm_device *dev);
 void drm_fb_helper_output_poll_changed(struct drm_device *dev);
 
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
+void drm_fbdev_generic_setup(struct drm_device *dev,
+                            unsigned int preferred_bpp);
 #else
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
                                        struct drm_fb_helper *helper,
@@ -443,10 +444,9 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
 {
 }
 
-static inline int
+static inline void
 drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 {
-       return 0;
 }
 
 #endif
index 5aaf1c4593a970e24d688c4ddde966bea3a10b8d..716990bace1048139e0c316e07d97d30a71e53f2 100644 (file)
@@ -201,6 +201,17 @@ struct drm_file {
         */
        bool writeback_connectors;
 
+       /**
+        * @was_master:
+        *
+        * This client has or had, master capability. Protected by struct
+        * &drm_device.master_mutex.
+        *
+        * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the
+        * client is or was master in the past.
+        */
+       bool was_master;
+
        /**
         * @is_master:
         *
index c0e0256e3e988f87df08b761db8c3ca1b2799e26..be658ebbec72b084c47d5f7ab8289a942d6f6632 100644 (file)
@@ -297,4 +297,42 @@ int drm_framebuffer_plane_width(int width,
 int drm_framebuffer_plane_height(int height,
                                 const struct drm_framebuffer *fb, int plane);
 
+/**
+ * struct drm_afbc_framebuffer - a special afbc frame buffer object
+ *
+ * A derived class of struct drm_framebuffer, dedicated for afbc use cases.
+ */
+struct drm_afbc_framebuffer {
+       /**
+        * @base: base framebuffer structure.
+        */
+       struct drm_framebuffer base;
+       /**
+        * @block_width: width of a single afbc block
+        */
+       u32 block_width;
+       /**
+        * @block_height: height of a single afbc block
+        */
+       u32 block_height;
+       /**
+        * @aligned_width: aligned frame buffer width
+        */
+       u32 aligned_width;
+       /**
+        * @aligned_height: aligned frame buffer height
+        */
+       u32 aligned_height;
+       /**
+        * @offset: offset of the first afbc header
+        */
+       u32 offset;
+       /**
+        * @afbc_size: minimum size of afbc buffer
+        */
+       u32 afbc_size;
+};
+
+#define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base)
+
 #endif
index d9f13fd25b0aef962808e95fc96a3e3b0f04f53d..6b013154911dcd1e6a26d831bd5fc0e2f28c0b33 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __DRM_GEM_FB_HELPER_H__
 #define __DRM_GEM_FB_HELPER_H__
 
+struct drm_afbc_framebuffer;
 struct drm_device;
 struct drm_fb_helper_surface_size;
 struct drm_file;
@@ -12,12 +13,19 @@ struct drm_plane;
 struct drm_plane_state;
 struct drm_simple_display_pipe;
 
+#define AFBC_VENDOR_AND_TYPE_MASK      GENMASK_ULL(63, 52)
+
 struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
                                          unsigned int plane);
 void drm_gem_fb_destroy(struct drm_framebuffer *fb);
 int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
                             unsigned int *handle);
 
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+                              struct drm_framebuffer *fb,
+                              struct drm_file *file,
+                              const struct drm_mode_fb_cmd2 *mode_cmd,
+                              const struct drm_framebuffer_funcs *funcs);
 struct drm_framebuffer *
 drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
                             const struct drm_mode_fb_cmd2 *mode_cmd,
@@ -29,6 +37,13 @@ struct drm_framebuffer *
 drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
                             const struct drm_mode_fb_cmd2 *mode_cmd);
 
+#define drm_is_afbc(modifier) \
+       (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
+
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+                        const struct drm_mode_fb_cmd2 *mode_cmd,
+                        struct drm_afbc_framebuffer *afbc_fb);
+
 int drm_gem_fb_prepare_fb(struct drm_plane *plane,
                          struct drm_plane_state *state);
 int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
index 0f6e47213d8d65611da6732a97e8d8f5786dce11..b63bcd1b996da4d19cf25f38a88034e8cc962d85 100644 (file)
@@ -196,7 +196,7 @@ static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
        return container_of(bdev, struct drm_vram_mm, bdev);
 }
 
-int drm_vram_mm_debugfs_init(struct drm_minor *minor);
+void drm_vram_mm_debugfs_init(struct drm_minor *minor);
 
 /*
  * Helpers for integration with struct drm_device
index aed382c17b2695a7d617d87fcbbe89351130bab2..852d7451eeb12943d9be4f77126d8a043df8f5fe 100644 (file)
@@ -194,11 +194,26 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock);
 
 #ifdef CONFIG_PCI
 
+struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
+                                    size_t align);
+void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
+
 int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
 void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
 
 #else
 
+static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
+                                                  size_t size, size_t align)
+{
+       return NULL;
+}
+
+static inline void drm_pci_free(struct drm_device *dev,
+                               struct drm_dma_handle *dmah)
+{
+}
+
 static inline int drm_legacy_pci_init(struct drm_driver *driver,
                                      struct pci_driver *pdriver)
 {
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
new file mode 100644 (file)
index 0000000..ca41146
--- /dev/null
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _DRM_MANAGED_H_
+#define _DRM_MANAGED_H_
+
+#include <linux/gfp.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+
+struct drm_device;
+
+typedef void (*drmres_release_t)(struct drm_device *dev, void *res);
+
+/**
+ * drmm_add_action - add a managed release action to a &drm_device
+ * @dev: DRM device
+ * @action: function which should be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * This function adds the @release action with optional parameter @data to the
+ * list of cleanup actions for @dev. The cleanup actions will be run in reverse
+ * order in the final drm_dev_put() call for @dev.
+ */
+#define drmm_add_action(dev, action, data) \
+       __drmm_add_action(dev, action, data, #action)
+
+int __must_check __drmm_add_action(struct drm_device *dev,
+                                  drmres_release_t action,
+                                  void *data, const char *name);
+
+/**
+ * drmm_add_action_or_reset - add a managed release action to a &drm_device
+ * @dev: DRM device
+ * @action: function which should be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * Similar to drmm_add_action(), with the only difference that upon failure
+ * @action is directly called for any cleanup work necessary on failures.
+ */
+#define drmm_add_action_or_reset(dev, action, data) \
+       __drmm_add_action_or_reset(dev, action, data, #action)
+
+int __must_check __drmm_add_action_or_reset(struct drm_device *dev,
+                                           drmres_release_t action,
+                                           void *data, const char *name);
+
+void drmm_add_final_kfree(struct drm_device *dev, void *container);
+
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
+
+/**
+ * drmm_kzalloc - &drm_device managed kzalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kzalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+       return drmm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+
+/**
+ * drmm_kmalloc_array - &drm_device managed kmalloc_array()
+ * @dev: DRM device
+ * @n: number of array elements to allocate
+ * @size: size of array member
+ * @flags: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc_array(). The allocated
+ * memory is automatically freed on the final drm_dev_put() and works exactly
+ * like a memory allocation obtained by drmm_kmalloc().
+ */
+static inline void *drmm_kmalloc_array(struct drm_device *dev,
+                                      size_t n, size_t size, gfp_t flags)
+{
+       size_t bytes;
+
+       if (unlikely(check_mul_overflow(n, size, &bytes)))
+               return NULL;
+
+       return drmm_kmalloc(dev, bytes, flags);
+}
+
+/**
+ * drmm_kcalloc - &drm_device managed kcalloc()
+ * @dev: DRM device
+ * @n: number of array elements to allocate
+ * @size: size of array member
+ * @flags: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kcalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+static inline void *drmm_kcalloc(struct drm_device *dev,
+                                size_t n, size_t size, gfp_t flags)
+{
+       return drmm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
+
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
+
+void drmm_kfree(struct drm_device *dev, void *data);
+
+#endif
index 33f325f5af2b921f6fd9b6f693c78257ea6f3cac..4d0e49c0ed2cf2efa11a61a9026689b800236dea 100644 (file)
@@ -152,7 +152,6 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
 int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
                      const struct drm_simple_display_pipe_funcs *funcs,
                      const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_release(struct drm_device *drm);
 void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
                          struct drm_plane_state *old_state);
 void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
@@ -170,7 +169,8 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
 
 int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
 int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+                             size_t len);
 int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
                      struct drm_rect *clip, bool swap);
 /**
@@ -187,12 +187,12 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
  */
 #define mipi_dbi_command(dbi, cmd, seq...) \
 ({ \
-       u8 d[] = { seq }; \
+       const u8 d[] = { seq }; \
        mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
 })
 
 #ifdef CONFIG_DEBUG_FS
-int mipi_dbi_debugfs_init(struct drm_minor *minor);
+void mipi_dbi_debugfs_init(struct drm_minor *minor);
 #else
 #define mipi_dbi_debugfs_init          NULL
 #endif
index ee8b0e80ca90b72d34e10dffc50b5562a52144ae..a01bc6fac83cef32f77ac849643b115f033929e8 100644 (file)
@@ -168,6 +168,7 @@ struct drm_mm_node {
        struct rb_node rb_hole_addr;
        u64 __subtree_last;
        u64 hole_size;
+       u64 subtree_max_hole;
        unsigned long flags;
 #define DRM_MM_NODE_ALLOCATED_BIT      0
 #define DRM_MM_NODE_SCANNED_BIT                1
index 3bcbe30339f044cd18d7bef994b8d98271ca9eca..6c3ef49b46b3ae7357dbfee50b73f4538bc62ed3 100644 (file)
@@ -929,7 +929,23 @@ struct drm_mode_config {
        const struct drm_mode_config_helper_funcs *helper_private;
 };
 
-void drm_mode_config_init(struct drm_device *dev);
+int __must_check drmm_mode_config_init(struct drm_device *dev);
+
+/**
+ * drm_mode_config_init - DRM mode_configuration structure initialization
+ * @dev: DRM device
+ *
+ * This is the unmanaged version of drmm_mode_config_init() for drivers which
+ * still explicitly call drm_mode_config_cleanup().
+ *
+ * FIXME: This function is deprecated and drivers should be converted over to
+ * drmm_mode_config_init().
+ */
+static inline int drm_mode_config_init(struct drm_device *dev)
+{
+       return drmm_mode_config_init(dev);
+}
+
 void drm_mode_config_reset(struct drm_device *dev);
 void drm_mode_config_cleanup(struct drm_device *dev);
 
index 99134d4f35eb7cc988e975992c1a2bd34ff6949d..730fc31de4fbfedbdfd8f0de7c5b65ab0ce690cf 100644 (file)
@@ -390,16 +390,6 @@ struct drm_display_mode {
         */
        int vrefresh;
 
-       /**
-        * @hsync:
-        *
-        * Horizontal refresh rate, for debug output in human readable form. Not
-        * used in a functional way.
-        *
-        * This value is in kHz.
-        */
-       int hsync;
-
        /**
         * @picture_aspect_ratio:
         *
@@ -493,7 +483,6 @@ int of_get_drm_display_mode(struct device_node *np,
                            int index);
 
 void drm_mode_set_name(struct drm_display_mode *mode);
-int drm_mode_hsync(const struct drm_display_mode *mode);
 int drm_mode_vrefresh(const struct drm_display_mode *mode);
 void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
                            int *hdisplay, int *vdisplay);
index 7c20b1c8b6a7a0b313cb4028f68a2372ece91bf5..421a30f084631749f092061f5c424bebe2c7f0ac 100644 (file)
@@ -1075,8 +1075,35 @@ struct drm_connector_helper_funcs {
        void (*atomic_commit)(struct drm_connector *connector,
                              struct drm_connector_state *state);
 
+       /**
+        * @prepare_writeback_job:
+        *
+        * As writeback jobs contain a framebuffer, drivers may need to
+        * prepare and clean them up the same way they can prepare and
+        * clean up framebuffers for planes. This optional connector operation
+        * is used to support the preparation of writeback jobs. The job
+        * prepare operation is called from drm_atomic_helper_prepare_planes()
+        * for struct &drm_writeback_connector connectors only.
+        *
+        * This operation is optional.
+        *
+        * This callback is used by the atomic modeset helpers.
+        */
        int (*prepare_writeback_job)(struct drm_writeback_connector *connector,
                                     struct drm_writeback_job *job);
+       /**
+        * @cleanup_writeback_job:
+        *
+        * This optional connector operation is used to support the
+        * cleanup of writeback jobs. The job cleanup operation is called
+        * from the existing drm_writeback_cleanup_job() function, invoked
+        * both when destroying the job as part of an aborted commit, or when
+        * the job completes.
+        *
+        * This operation is optional.
+        *
+        * This callback is used by the atomic modeset helpers.
+        */
        void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
                                      struct drm_writeback_job *job);
 };
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
deleted file mode 100644 (file)
index 3941b02..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Internal Header for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_PCI_H_
-#define _DRM_PCI_H_
-
-#include <linux/pci.h>
-
-struct drm_dma_handle;
-struct drm_device;
-struct drm_driver;
-struct drm_master;
-
-#ifdef CONFIG_PCI
-
-struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
-                                    size_t align);
-void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
-                                                  size_t size, size_t align)
-{
-       return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
-                               struct drm_dma_handle *dmah)
-{
-}
-
-#endif
-
-#endif /* _DRM_PCI_H_ */
index ca7cee8e728a5e7c94c8809c7b0bafc4ecb7959c..1c9417430d08520f8c5c9a593ee874f7d5aebdfc 100644 (file)
@@ -313,6 +313,10 @@ enum drm_debug_category {
         * @DRM_UT_DP: Used in the DP code.
         */
        DRM_UT_DP               = 0x100,
+       /**
+        * @DRM_UT_DRMRES: Used in the drm managed resources code.
+        */
+       DRM_UT_DRMRES           = 0x200,
 };
 
 static inline bool drm_debug_enabled(enum drm_debug_category category)
@@ -442,6 +446,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
        drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
 #define drm_dbg_dp(drm, fmt, ...)                                      \
        drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+#define drm_dbg_drmres(drm, fmt, ...)                                  \
+       drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
 
 
 /*
index 777c14c847f03f85f5a0aa57864ef7037c5a26c3..9697d2714d2abf420eb22525afbcdbd991b73daf 100644 (file)
 #include <drm/drm_encoder.h>
 #include <linux/workqueue.h>
 
+/**
+ * struct drm_writeback_connector - DRM writeback connector
+ */
 struct drm_writeback_connector {
+       /**
+        * @base: base drm_connector object
+        */
        struct drm_connector base;
 
        /**
@@ -78,6 +84,9 @@ struct drm_writeback_connector {
        char timeline_name[32];
 };
 
+/**
+ * struct drm_writeback_job - DRM writeback job
+ */
 struct drm_writeback_job {
        /**
         * @connector:
index 26b04ff62676612aee4a507255710db4177853ba..a21b3b92135a6ee4898a20fed3464967c463172f 100644 (file)
@@ -56,6 +56,7 @@ enum drm_sched_priority {
  *              Jobs from this entity can be scheduled on any scheduler
  *              on this list.
  * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
+ * @priority: priority of the entity
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
  * @job_queue: the list of jobs of this entity.
  * @fence_seq: a linearly increasing seqno incremented with each
index c9e0fd09f4b25cbca01c8b7fbca457ff8db857e0..54a527aa79ccddae36b731b0f015aa203ad81b39 100644 (file)
@@ -390,7 +390,6 @@ struct ttm_bo_driver {
 /**
  * struct ttm_bo_global - Buffer object driver global data.
  *
- * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
  * @dummy_read_page: Pointer to a dummy page used for mapping requests
  * of unpopulated pages.
  * @shrink: A shrink callback object used for buffer object swap.
diff --git a/include/drm/ttm/ttm_debug.h b/include/drm/ttm/ttm_debug.h
deleted file mode 100644 (file)
index b5e460f..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2017 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Tom St Denis <tom.stdenis@amd.com>
- */
-extern void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt);
-extern void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt);
index c1c0f9ea4e63a9afa1403adb2dd2a9760feebd68..a0ee494a6329a8cc48760bceb81ea6ba9f56585e 100644 (file)
@@ -319,7 +319,7 @@ struct bio_integrity_payload {
        struct work_struct      bip_work;       /* I/O completion */
 
        struct bio_vec          *bip_vec;
-       struct bio_vec          bip_inline_vecs[0];/* embedded bvec array */
+       struct bio_vec          bip_inline_vecs[];/* embedded bvec array */
 };
 
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
index f389d7c724bd2b25a8a6477060079282da7ff88e..b45148ba3291208fe83bcdd60d9c784ba725d4be 100644 (file)
@@ -173,7 +173,7 @@ struct blk_mq_hw_ctx {
         * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
         * blk_mq_hw_ctx_size().
         */
-       struct srcu_struct      srcu[0];
+       struct srcu_struct      srcu[];
 };
 
 /**
index 70254ae117690c40fa40692bbf790c13cfcdacdf..31eb92876be7ccf2d48f6c453f312b371bcdda50 100644 (file)
@@ -198,7 +198,7 @@ struct bio {
         * double allocations for a small number of bio_vecs. This member
         * MUST obviously be kept at the very end of the bio.
         */
-       struct bio_vec          bi_inline_vecs[0];
+       struct bio_vec          bi_inline_vecs[];
 };
 
 #define BIO_RESET_BYTES                offsetof(struct bio, bi_max_vecs)
index e0b020eaf32e252ba7536958f5035747fe358d0a..15b765a181b8504f10ba7b827f9cbe3ddbbfae28 100644 (file)
@@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
 void __brelse(struct buffer_head *);
 void __bforget(struct buffer_head *);
 void __breadahead(struct block_device *, sector_t block, unsigned int size);
+void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
+                 gfp_t gfp);
 struct buffer_head *__bread_gfp(struct block_device *,
                                sector_t block, unsigned size, gfp_t gfp);
 void invalidate_bh_lrus(void);
@@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block)
        __breadahead(sb->s_bdev, block, sb->s_blocksize);
 }
 
+static inline void
+sb_breadahead_unmovable(struct super_block *sb, sector_t block)
+{
+       __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
+}
+
 static inline struct buffer_head *
 sb_getblk(struct super_block *sb, sector_t block)
 {
index 511a37302fea6ff66f008586056a6768c0ef3e13..5fd627e9da19859f28c3de93446c3c4e367c269c 100644 (file)
@@ -189,7 +189,7 @@ struct __packed pucan_rx_msg {
        u8      client;
        __le16  flags;
        __le32  can_id;
-       u8      d[0];
+       u8      d[];
 };
 
 /* uCAN error types */
@@ -266,7 +266,7 @@ struct __packed pucan_tx_msg {
        u8      client;
        __le16  flags;
        __le32  can_id;
-       u8      d[0];
+       u8      d[];
 };
 
 /* build the cmd opcode_channel field with respect to the correct endianness */
index 02edeafcb2bf84af2354a9882b82a7ef6e57fa0f..be8aea04d0234d7578b78ca15b9a2d14815cd2bb 100644 (file)
@@ -28,7 +28,7 @@ struct cpu_rmap {
        struct {
                u16     index;
                u16     dist;
-       }               near[0];
+       }               near[];
 };
 #define CPU_RMAP_DIST_INF 0xffff
 
index 594fc66a395a7b6a42db741f43726e219cc13362..2ace69e4108850674030db3cf785f117d3391110 100644 (file)
@@ -29,7 +29,7 @@ struct pubkey_hdr {
        uint32_t        timestamp;      /* key made, always 0 for now */
        uint8_t         algo;
        uint8_t         nmpi;
-       char            mpi[0];
+       char            mpi[];
 } __packed;
 
 struct signature_hdr {
@@ -39,7 +39,7 @@ struct signature_hdr {
        uint8_t         hash;
        uint8_t         keyid[8];
        uint8_t         nmpi;
-       char            mpi[0];
+       char            mpi[];
 } __packed;
 
 #if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE)
index fc61f3cff72f7b79f1ed0fe4a51b4e973e705901..99002220cd452646b924f76f3c37909d804a67d4 100644 (file)
@@ -7,7 +7,7 @@ struct linux_dirent64 {
        s64             d_off;
        unsigned short  d_reclen;
        unsigned char   d_type;
-       char            d_name[0];
+       char            d_name[];
 };
 
 #endif
index 1ade486fc2bbcf99580c82c531ece8622b2077aa..82e0a4a64601f9864f9746dd4bef5ebdb10f4b2e 100644 (file)
@@ -334,6 +334,14 @@ struct dma_buf {
  * Attachment operations implemented by the importer.
  */
 struct dma_buf_attach_ops {
+       /**
+        * @allow_peer2peer:
+        *
+        * If this is set to true the importer must be able to handle peer
+        * resources without struct pages.
+        */
+       bool allow_peer2peer;
+
        /**
         * @move_notify
         *
@@ -362,6 +370,7 @@ struct dma_buf_attach_ops {
  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
  * @sgt: cached mapping.
  * @dir: direction of cached mapping.
+ * @peer2peer: true if the importer can handle peer resources without pages.
  * @priv: exporter specific attachment data.
  * @importer_ops: importer operations for this attachment, if provided
  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
@@ -382,6 +391,7 @@ struct dma_buf_attachment {
        struct list_head node;
        struct sg_table *sgt;
        enum dma_data_direction dir;
+       bool peer2peer;
        const struct dma_buf_attach_ops *importer_ops;
        void *importer_priv;
        void *priv;
index 564e96f625ff37198d2779720426f509536ee320..1c630e2c2756c5e5c6ce6c391c010333621f475e 100644 (file)
@@ -101,7 +101,7 @@ struct enclosure_device {
        struct device edev;
        struct enclosure_component_callbacks *cb;
        int components;
-       struct enclosure_component component[0];
+       struct enclosure_component component[];
 };
 
 static inline struct enclosure_device *
index d249b88a4d5a731e1bd2dd42e019a123909b9d48..ade6486a3382486752868509838da30da7134f68 100644 (file)
@@ -36,7 +36,7 @@ struct em_cap_state {
 struct em_perf_domain {
        struct em_cap_state *table;
        int nr_cap_states;
-       unsigned long cpus[0];
+       unsigned long cpus[];
 };
 
 #ifdef CONFIG_ENERGY_MODEL
index c1d379bf6ee1542eb420c83dc672a4c8f8ae3e77..a23b26eab479007408a90a2ebe4dfd823d8078a2 100644 (file)
@@ -35,7 +35,7 @@ struct compat_ethtool_rxnfc {
        compat_u64                      data;
        struct compat_ethtool_rx_flow_spec fs;
        u32                             rule_cnt;
-       u32                             rule_locs[0];
+       u32                             rule_locs[];
 };
 
 #endif /* CONFIG_COMPAT */
@@ -462,7 +462,7 @@ int ethtool_check_ops(const struct ethtool_ops *ops);
 
 struct ethtool_rx_flow_rule {
        struct flow_rule        *rule;
-       unsigned long           priv[0];
+       unsigned long           priv[];
 };
 
 struct ethtool_rx_flow_spec_input {
index 5b14a0f381241de51ac575d483847af8fb8b5b3b..0bd581003cd5dfa99dacce00f6b1fd554687f9cd 100644 (file)
@@ -76,7 +76,7 @@ struct gen_pool_chunk {
        void *owner;                    /* private data to retrieve at alloc time */
        unsigned long start_addr;       /* start address of memory chunk */
        unsigned long end_addr;         /* end address of memory chunk (inclusive) */
-       unsigned long bits[0];          /* bitmap for allocating memory chunk */
+       unsigned long bits[];           /* bitmap for allocating memory chunk */
 };
 
 /*
index 456fc17ecb1c3823731ca5768d3ba71ed70308a8..45d36ba4826bd8736b12375b3262f1f7980be50b 100644 (file)
@@ -461,12 +461,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap,
                       unsigned short const *addr_list,
                       int (*probe)(struct i2c_adapter *adap, unsigned short addr));
 
-struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
-                      struct i2c_board_info *info,
-                      unsigned short const *addr_list,
-                      int (*probe)(struct i2c_adapter *adap, unsigned short addr));
-
 /* Common custom probe functions */
 int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
 
index 463047d0190bc4baa470fd7eb9217ad3ffa97ed3..faa6586a57838554cf02402d6659691f17bd458f 100644 (file)
@@ -38,7 +38,7 @@ struct ip_sf_socklist {
        unsigned int            sl_max;
        unsigned int            sl_count;
        struct rcu_head         rcu;
-       __be32                  sl_addr[0];
+       __be32                  sl_addr[];
 };
 
 #define IP_SFLSIZE(count)      (sizeof(struct ip_sf_socklist) + \
index 98cb5ce0b0a0839a56c9dfd0ee222d7ec0d8aff7..b824877e6d1b6c48ab221ee14c894d3aaef78686 100644 (file)
@@ -18,7 +18,7 @@
 struct ihex_binrec {
        __be32 addr;
        __be16 len;
-       uint8_t data[0];
+       uint8_t data[];
 } __attribute__((packed));
 
 static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p)
index 9315fbb87db37ff67413cf4c7ff0bc03f8c27fc4..8d5bc2c237d74a7f937a5b05072384117273a239 100644 (file)
@@ -573,8 +573,6 @@ enum {
 #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
 
 struct irqaction;
-extern int setup_irq(unsigned int irq, struct irqaction *new);
-extern void remove_irq(unsigned int irq, struct irqaction *act);
 extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
 extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
 
@@ -1043,7 +1041,7 @@ struct irq_chip_generic {
        unsigned long           unused;
        struct irq_domain       *domain;
        struct list_head        list;
-       struct irq_chip_type    chip_types[0];
+       struct irq_chip_type    chip_types[];
 };
 
 /**
@@ -1079,7 +1077,7 @@ struct irq_domain_chip_generic {
        unsigned int            irq_flags_to_clear;
        unsigned int            irq_flags_to_set;
        enum irq_gc_flags       gc_flags;
-       struct irq_chip_generic *gc[0];
+       struct irq_chip_generic *gc[];
 };
 
 /* Generic chip callback functions */
index 765d9b769b69504c0033197342d2e9c79f8316b7..6c36b6cc3edf6f46f9050fb3a90e13c8f4f32b25 100644 (file)
 
 #define GICR_TYPER_PLPIS               (1U << 0)
 #define GICR_TYPER_VLPIS               (1U << 1)
+#define GICR_TYPER_DIRTY               (1U << 2)
 #define GICR_TYPER_DirectLPIS          (1U << 3)
 #define GICR_TYPER_LAST                        (1U << 4)
 #define GICR_TYPER_RVPEID              (1U << 7)
@@ -686,6 +687,7 @@ struct rdists {
        bool                    has_vlpis;
        bool                    has_rvpeid;
        bool                    has_direct_lpi;
+       bool                    has_vpend_valid_dirty;
 };
 
 struct irq_domain;
index d5ceb2839a2ded8ccf49524d76a5eedd666b1cd1..9dcaa3e582c90915a1af58a58ef099d88323a5a2 100644 (file)
@@ -34,7 +34,7 @@ struct list_lru_one {
 struct list_lru_memcg {
        struct rcu_head         rcu;
        /* array of per cgroup lists, indexed by memcg_cache_id */
-       struct list_lru_one     *lru[0];
+       struct list_lru_one     *lru[];
 };
 
 struct list_lru_node {
index 1b4150ff64be53ce4bb7a514968dd3bf1e3c7e93..d275c72c4f8efd09b9b3ccf8a8dffc6344d1a642 100644 (file)
@@ -106,7 +106,7 @@ struct lruvec_stat {
  */
 struct memcg_shrinker_map {
        struct rcu_head rcu;
-       unsigned long map[0];
+       unsigned long map[];
 };
 
 /*
@@ -148,7 +148,7 @@ struct mem_cgroup_threshold_ary {
        /* Size of entries[] */
        unsigned int size;
        /* Array of thresholds */
-       struct mem_cgroup_threshold entries[0];
+       struct mem_cgroup_threshold entries[];
 };
 
 struct mem_cgroup_thresholds {
index 25f46a939637f2513c6bd6537b417bb8dffc51a0..3e268e636b5bcd005e839c6b213dc44f57a0d743 100644 (file)
@@ -83,7 +83,7 @@ struct wilco_ec_response {
        u16 result;
        u16 data_size;
        u8 reserved[2];
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 /**
index 540595a321a762d305bea8bb1ffa6586a6df0585..90797f1b421d8fec87325268aeae3181fb933978 100644 (file)
@@ -28,7 +28,7 @@ struct posix_acl {
        refcount_t              a_refcount;
        struct rcu_head         a_rcu;
        unsigned int            a_count;
-       struct posix_acl_entry  a_entries[0];
+       struct posix_acl_entry  a_entries[];
 };
 
 #define FOREACH_ACL_ENTRY(pa, acl, pe) \
index 317bace5ac641db4ac34df344b835c6aca3b43f8..2cd637268b4f9254a5751cefa3d84726457f6df8 100644 (file)
@@ -100,7 +100,7 @@ struct rio_switch {
        u32 port_ok;
        struct rio_switch_ops *ops;
        spinlock_t lock;
-       struct rio_dev *nextdev[0];
+       struct rio_dev *nextdev[];
 };
 
 /**
@@ -201,7 +201,7 @@ struct rio_dev {
        u8 hopcount;
        struct rio_dev *prev;
        atomic_t state;
-       struct rio_switch rswitch[0];   /* RIO switch info */
+       struct rio_switch rswitch[];    /* RIO switch info */
 };
 
 #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
index 5974cedd008c345a3261229e30ca7e290d388d76..238bb85243d3653fee5170c58ea2112da6289154 100644 (file)
@@ -54,7 +54,7 @@ struct rs_codec {
  */
 struct rs_control {
        struct rs_codec *codec;
-       uint16_t        buffers[0];
+       uint16_t        buffers[];
 };
 
 /* General purpose RS codec, 8-bit data width, symbol width 1-15 bit  */
index af9319e4cfb967c98a3a151bbe5829675308fea5..95253ad792b0c85ab020b026507b8283a08a09fb 100644 (file)
@@ -142,7 +142,7 @@ struct sched_domain {
         * by attaching extra space to the end of the structure,
         * depending on how many CPUs the kernel has booted up with)
         */
-       unsigned long span[0];
+       unsigned long span[];
 };
 
 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
index 3a2ac7072dbba12c396e17c106b821f04747c219..3000c526f5526c06edadc598bead4cc5fd954f79 100644 (file)
@@ -4162,7 +4162,7 @@ struct skb_ext {
        refcount_t refcnt;
        u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
        u8 chunks;              /* same */
-       char data[0] __aligned(8);
+       char data[] __aligned(8);
 };
 
 struct skb_ext *__skb_ext_alloc(void);
index b835d8dbea0e3d95ef552ba2540939ced3358cb5..e1bbf7a16b2767ab08d53f9b297d5c1c87cc992f 100644 (file)
@@ -275,7 +275,7 @@ struct swap_info_struct {
                                         */
        struct work_struct discard_work; /* discard worker */
        struct swap_cluster_list discard_clusters; /* discard clusters list */
-       struct plist_node avail_lists[0]; /*
+       struct plist_node avail_lists[]; /*
                                           * entries in swap_avail_heads, one
                                           * entry per node.
                                           * Must be last as the number of the
index eb6cbdf10e5087e942ba678791469a4087e4228e..44a7f9169ac67a0b81da41792fa7a588deb4f784 100644 (file)
@@ -295,7 +295,7 @@ struct bts_header {
        u32 magic;
        u32 version;
        u8 future[24];
-       u8 actions[0];
+       u8 actions[];
 } __attribute__ ((packed));
 
 /**
@@ -305,7 +305,7 @@ struct bts_header {
 struct bts_action {
        u16 type;
        u16 size;
-       u8 data[0];
+       u8 data[];
 } __attribute__ ((packed));
 
 struct bts_action_send {
@@ -315,7 +315,7 @@ struct bts_action_send {
 struct bts_action_wait {
        u32 msec;
        u32 size;
-       u8 data[0];
+       u8 data[];
 } __attribute__ ((packed));
 
 struct bts_action_delay {
index 131ea1bad458bee37bb418d05c1504f90c2abc16..c253461b1c4e662fd5754d4c23d015acec667ad9 100644 (file)
@@ -28,7 +28,7 @@ struct tcpa_event {
        u32 event_type;
        u8 pcr_value[20];       /* SHA1 */
        u32 event_size;
-       u8 event_data[0];
+       u8 event_data[];
 };
 
 enum tcpa_event_types {
@@ -55,7 +55,7 @@ enum tcpa_event_types {
 struct tcpa_pc_event {
        u32 event_id;
        u32 event_size;
-       u8 event_data[0];
+       u8 event_data[];
 };
 
 enum tcpa_pc_event_ids {
@@ -102,7 +102,7 @@ struct tcg_pcr_event {
 
 struct tcg_event_field {
        u32 event_size;
-       u8 event[0];
+       u8 event[];
 } __packed;
 
 struct tcg_pcr_event2_head {
index 4cf6e11f4a3cb5537f3c1650c234c47457a5a782..47eaa34f87619c11b150590f6bbb1f252d806eea 100644 (file)
@@ -73,7 +73,7 @@ struct simple_xattr {
        struct list_head list;
        char *name;
        size_t size;
-       char value[0];
+       char value[];
 };
 
 /*
index c78bd4ff9e3378d4158e389ce3fb4c49acb296b2..70e48f66dac8ef5d3f608de65653af2f86728ede 100644 (file)
@@ -905,6 +905,8 @@ struct survey_info {
  *     protocol frames.
  * @control_port_over_nl80211: TRUE if userspace expects to exchange control
  *     port frames over NL80211 instead of the network interface.
+ * @control_port_no_preauth: disables pre-auth rx over the nl80211 control
+ *     port for mac80211
  * @wep_keys: static WEP keys, if not NULL points to an array of
  *     CFG80211_MAX_WEP_KEYS WEP keys
  * @wep_tx_key: key index (0..3) of the default TX static WEP key
@@ -1222,6 +1224,7 @@ struct sta_txpwr {
  * @he_capa: HE capabilities of station
  * @he_capa_len: the length of the HE capabilities
  * @airtime_weight: airtime scheduler weight for this station
+ * @txpwr: transmit power for an associated station
  */
 struct station_parameters {
        const u8 *supported_rates;
@@ -4666,6 +4669,9 @@ struct wiphy_iftype_akm_suites {
  * @txq_memory_limit: configuration internal TX queue memory limit
  * @txq_quantum: configuration of internal TX queue scheduler quantum
  *
+ * @tx_queue_len: allow setting transmit queue len for drivers not using
+ *     wake_tx_queue
+ *
  * @support_mbssid: can HW support association with nontransmitted AP
  * @support_only_he_mbssid: don't parse MBSSID elements if it is not
  *     HE AP, in order to avoid compatibility issues.
@@ -4681,6 +4687,10 @@ struct wiphy_iftype_akm_suites {
  *     supported by the driver for each peer
  * @tid_config_support.max_retry: maximum supported retry count for
  *     long/short retry configuration
+ *
+ * @max_data_retry_count: maximum supported per TID retry count for
+ *     configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
+ *     %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
index f7543c095b33d854bd70451698c44fcf9a69be98..9947eb1e9eb67ebacd567616c29c767df407524b 100644 (file)
@@ -254,6 +254,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
 
        return rt->rt6i_flags & RTF_ANYCAST ||
                (rt->rt6i_dst.plen < 127 &&
+                !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
                 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
 }
 
index 6eb627b3c99b6f8cbd10a2af131489a26e83e579..4ff7c81e6717b6f12eeca46d6e0d7df110958dfe 100644 (file)
@@ -901,7 +901,7 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
 {
        struct nft_expr *expr;
 
-       if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
+       if (__nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
                expr = nft_set_ext_expr(ext);
                expr->ops->eval(expr, regs, pkt);
        }
index 6d84784d33fa007d91836dc9c51532a23048cf08..3e8c6d4b4b59ff00a0cb8fbcbb256f55b5ac895a 100644 (file)
@@ -2553,9 +2553,9 @@ sk_is_refcounted(struct sock *sk)
 }
 
 /**
- * skb_steal_sock
- * @skb to steal the socket from
- * @refcounted is set to true if the socket is reference-counted
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
  */
 static inline struct sock *
 skb_steal_sock(struct sk_buff *skb, bool *refcounted)
index ebffcb36a7e342a437d4641aa00e5e15673c1564..6d6a3947c8b76c4cc08149d1cee9f99619af277c 100644 (file)
@@ -476,6 +476,8 @@ struct ocelot_port {
 
        void __iomem                    *regs;
 
+       bool                            vlan_aware;
+
        /* Ingress default VLAN (pvid) */
        u16                             pvid;
 
@@ -610,7 +612,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
 int ocelot_fdb_dump(struct ocelot *ocelot, int port,
                    dsa_fdb_dump_cb_t *cb, void *data);
 int ocelot_fdb_add(struct ocelot *ocelot, int port,
-                  const unsigned char *addr, u16 vid, bool vlan_aware);
+                  const unsigned char *addr, u16 vid);
 int ocelot_fdb_del(struct ocelot *ocelot, int port,
                   const unsigned char *addr, u16 vid);
 int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
index 3ee8036f5436d31912bb60b8ed68821b468ad338..225154a4f2ed01f262d81c8a70d762d4bddf1458 100644 (file)
@@ -494,6 +494,11 @@ void snd_hda_update_power_acct(struct hda_codec *codec);
 static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {}
 #endif
 
+static inline bool hda_codec_need_resume(struct hda_codec *codec)
+{
+       return !codec->relaxed_resume && codec->jacktbl.used;
+}
+
 #ifdef CONFIG_SND_HDA_PATCH_LOADER
 /*
  * patch firmware
index 37342a13c9cb9372f849f2e3d3d5c75bf7c7eb6b..784814160197bb7f4daf94d1508d31393f7f7a91 100644 (file)
@@ -46,7 +46,7 @@ TRACE_EVENT(wbt_stat,
        ),
 
        TP_printk("%s: rmean=%llu, rmin=%llu, rmax=%llu, rsamples=%llu, "
-                 "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu\n",
+                 "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu",
                  __entry->name, __entry->rmean, __entry->rmin, __entry->rmax,
                  __entry->rnr_samples, __entry->wmean, __entry->wmin,
                  __entry->wmax, __entry->wnr_samples)
@@ -73,7 +73,7 @@ TRACE_EVENT(wbt_lat,
                __entry->lat = div_u64(lat, 1000);
        ),
 
-       TP_printk("%s: latency %lluus\n", __entry->name,
+       TP_printk("%s: latency %lluus", __entry->name,
                        (unsigned long long) __entry->lat)
 );
 
@@ -115,7 +115,7 @@ TRACE_EVENT(wbt_step,
                __entry->max    = max;
        ),
 
-       TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u\n",
+       TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u",
                  __entry->name, __entry->msg, __entry->step, __entry->window,
                  __entry->bg, __entry->normal, __entry->max)
 );
@@ -148,7 +148,7 @@ TRACE_EVENT(wbt_timer,
                __entry->inflight       = inflight;
        ),
 
-       TP_printk("%s: status=%u, step=%d, inflight=%u\n", __entry->name,
+       TP_printk("%s: status=%u, step=%d, inflight=%u", __entry->name,
                  __entry->status, __entry->step, __entry->inflight)
 );
 
index 65f69723cbdc1cce661115352b098de49381783c..4e873dcbe68f1338021c7e115a88b6fbb8c3f590 100644 (file)
@@ -133,6 +133,11 @@ extern "C" {
  * releasing the memory
  */
 #define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
+/* Flag that BO will be encrypted and that the TMZ bit should be
+ * set in the PTEs when mapping this buffer via GPUVM or
+ * accessing it with various hw blocks
+ */
+#define AMDGPU_GEM_CREATE_ENCRYPTED            (1 << 10)
 
 struct drm_amdgpu_gem_create_in  {
        /** the requested memory size */
@@ -346,6 +351,10 @@ struct drm_amdgpu_gem_userptr {
 #define AMDGPU_TILING_DCC_PITCH_MAX_MASK               0x3FFF
 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT                43
 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK         0x1
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT       44
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK                0x1
+#define AMDGPU_TILING_SCANOUT_SHIFT                    63
+#define AMDGPU_TILING_SCANOUT_MASK                     0x1
 
 /* Set/Get helpers for tiling flags. */
 #define AMDGPU_TILING_SET(field, value) \
@@ -555,7 +564,7 @@ struct drm_amdgpu_cs_in {
        /**  Handle of resource list associated with CS */
        __u32           bo_list_handle;
        __u32           num_chunks;
-       __u32           _pad;
+       __u32           flags;
        /** this points to __u64 * which point to cs chunks */
        __u64           chunks;
 };
@@ -589,6 +598,14 @@ union drm_amdgpu_cs {
  */
 #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
 
+/* Flag the IB as secure (TMZ)
+ */
+#define AMDGPU_IB_FLAGS_SECURE  (1 << 5)
+
+/* Tell KMD to flush and invalidate caches
+ */
+#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC  (1 << 6)
+
 struct drm_amdgpu_cs_chunk_ib {
        __u32 _pad;
        /** AMDGPU_IB_FLAG_* */
index 8bc0b31597d80737a804e96fb81b46b5308afc27..9e488d10f8b401f67b331b6ec5f47ecc9c256eff 100644 (file)
@@ -354,9 +354,12 @@ extern "C" {
  * a platform-dependent stride. On top of that the memory can apply
  * platform-depending swizzling of some higher address bits into bit6.
  *
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
  */
 #define I915_FORMAT_MOD_X_TILED        fourcc_mod_code(INTEL, 1)
 
@@ -369,9 +372,12 @@ extern "C" {
  * memory can apply platform-depending swizzling of some higher address bits
  * into bit6.
  *
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
  */
 #define I915_FORMAT_MOD_Y_TILED        fourcc_mod_code(INTEL, 2)
 
index 8134924cfc1746afe0db2b3f465874a5d5532501..e6b6cb0f8bc6a9e56ccd27c14f89202ae5cd3e5d 100644 (file)
@@ -36,12 +36,10 @@ struct btrfs_ioctl_vol_args {
 #define BTRFS_DEVICE_PATH_NAME_MAX     1024
 #define BTRFS_SUBVOL_NAME_MAX          4039
 
-/*
- * Deprecated since 5.7:
- *
- * BTRFS_SUBVOL_CREATE_ASYNC   (1ULL << 0)
- */
-
+#ifndef __KERNEL__
+/* Deprecated since 5.7 */
+# define BTRFS_SUBVOL_CREATE_ASYNC     (1ULL << 0)
+#endif
 #define BTRFS_SUBVOL_RDONLY            (1ULL << 1)
 #define BTRFS_SUBVOL_QGROUP_INHERIT    (1ULL << 2)
 
index f880d28311605cb941bbc53d94ff4921f88e35df..e83954c69fff7fd256a0838a9ba493a6c37d44c3 100644 (file)
@@ -45,13 +45,13 @@ struct dlm_lock_params {
        void __user *bastaddr;
        struct dlm_lksb __user *lksb;
        char lvb[DLM_USER_LVB_LEN];
-       char name[0];
+       char name[];
 };
 
 struct dlm_lspace_params {
        __u32 flags;
        __u32 minor;
-       char name[0];
+       char name[];
 };
 
 struct dlm_purge_params {
index 8c0bc24d5d95555754e6bfe56e0ba5543faa460d..7a900b2377b603df04f61461888566aa125fc061 100644 (file)
@@ -34,7 +34,7 @@ struct fiemap {
        __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
        __u32 fm_extent_count;  /* size of fm_extents array (in) */
        __u32 fm_reserved;
-       struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
+       struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */
 };
 
 #define FIEMAP_MAX_OFFSET      (~0ULL)
index 20917c59f39c9a2a27bf1b62e907d5e2a31f88a6..b6be62356d343f64b4576b55aa8a737dfd173397 100644 (file)
@@ -251,7 +251,7 @@ struct kfd_memory_exception_failure {
        __u32 imprecise;        /* Can't determine the  exact fault address */
 };
 
-/* memory exception data*/
+/* memory exception data */
 struct kfd_hsa_memory_exception_data {
        struct kfd_memory_exception_failure failure;
        __u64 va;
@@ -410,6 +410,20 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
        __u32 n_success;                /* to/from KFD */
 };
 
+/* Allocate GWS for specific queue
+ *
+ * @queue_id:    queue's id that GWS is allocated for
+ * @num_gws:     how many GWS to allocate
+ * @first_gws:   index of the first GWS allocated.
+ *               only support contiguous GWS allocation
+ */
+struct kfd_ioctl_alloc_queue_gws_args {
+       __u32 queue_id;         /* to KFD */
+       __u32 num_gws;          /* to KFD */
+       __u32 first_gws;        /* from KFD */
+       __u32 pad;
+};
+
 struct kfd_ioctl_get_dmabuf_info_args {
        __u64 size;             /* from KFD */
        __u64 metadata_ptr;     /* to KFD */
@@ -529,7 +543,10 @@ enum kfd_mmio_remap {
 #define AMDKFD_IOC_IMPORT_DMABUF               \
                AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
 
+#define AMDKFD_IOC_ALLOC_QUEUE_GWS             \
+               AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
+
 #define AMDKFD_COMMAND_START           0x01
-#define AMDKFD_COMMAND_END             0x1E
+#define AMDKFD_COMMAND_END             0x1F
 
 #endif
index 30f2a87270dc5622cf616084d542cdc765193a44..4565456c0ef447e44e59791b23be2e702b828054 100644 (file)
@@ -276,6 +276,7 @@ enum nft_rule_compat_attributes {
  * @NFT_SET_TIMEOUT: set uses timeouts
  * @NFT_SET_EVAL: set can be updated from the evaluation path
  * @NFT_SET_OBJECT: set contains stateful objects
+ * @NFT_SET_CONCAT: set contains a concatenation
  */
 enum nft_set_flags {
        NFT_SET_ANONYMOUS               = 0x1,
@@ -285,6 +286,7 @@ enum nft_set_flags {
        NFT_SET_TIMEOUT                 = 0x10,
        NFT_SET_EVAL                    = 0x20,
        NFT_SET_OBJECT                  = 0x40,
+       NFT_SET_CONCAT                  = 0x80,
 };
 
 /**
index 434e6506abaa4b24dab815e8dbfeea6efecee4fd..49ddcdc61c09499909c00c0bd2f5cd91b14c0fed 100644 (file)
@@ -48,6 +48,7 @@ struct idletimer_tg_info_v1 {
 
        char label[MAX_IDLETIMER_LABEL_SIZE];
 
+       __u8 send_nl_msg;   /* unused: for compatibility with Android */
        __u8 timer_type;
 
        /* for kernel module internal use only */
index f02504640e1851e2fc0199042fbac7fe11ba596d..6b12f06ee18c3d7de63b10ec1a76bcefb1121111 100644 (file)
@@ -30,7 +30,7 @@ struct bpf_lru_node {
 struct bpf_lru_list {
        struct list_head lists[NR_BPF_LRU_LIST_T];
        unsigned int counts[NR_BPF_LRU_LIST_COUNT];
-       /* The next inacitve list rotation starts from here */
+       /* The next inactive list rotation starts from here */
        struct list_head *next_inactive_rotation;
 
        raw_spinlock_t lock ____cacheline_aligned_in_smp;
index 64783da342020212f12a1a57212b16e330786b5f..d85f372395407df1d02ab0fe5bbd7009a2289647 100644 (file)
@@ -586,9 +586,7 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
 {
        struct bpf_map *map = vma->vm_file->private_data;
 
-       bpf_map_inc_with_uref(map);
-
-       if (vma->vm_flags & VM_WRITE) {
+       if (vma->vm_flags & VM_MAYWRITE) {
                mutex_lock(&map->freeze_mutex);
                map->writecnt++;
                mutex_unlock(&map->freeze_mutex);
@@ -600,13 +598,11 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
 {
        struct bpf_map *map = vma->vm_file->private_data;
 
-       if (vma->vm_flags & VM_WRITE) {
+       if (vma->vm_flags & VM_MAYWRITE) {
                mutex_lock(&map->freeze_mutex);
                map->writecnt--;
                mutex_unlock(&map->freeze_mutex);
        }
-
-       bpf_map_put_with_uref(map);
 }
 
 static const struct vm_operations_struct bpf_map_default_vmops = {
@@ -635,14 +631,16 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
        /* set default open/close callbacks */
        vma->vm_ops = &bpf_map_default_vmops;
        vma->vm_private_data = map;
+       vma->vm_flags &= ~VM_MAYEXEC;
+       if (!(vma->vm_flags & VM_WRITE))
+               /* disallow re-mapping with PROT_WRITE */
+               vma->vm_flags &= ~VM_MAYWRITE;
 
        err = map->ops->map_mmap(map, vma);
        if (err)
                goto out;
 
-       bpf_map_inc_with_uref(map);
-
-       if (vma->vm_flags & VM_WRITE)
+       if (vma->vm_flags & VM_MAYWRITE)
                map->writecnt++;
 out:
        mutex_unlock(&map->freeze_mutex);
index 04c6630cc18f97b53e2d7a39cd9fbc87435f1bef..38cfcf701eeb7d9ec19349167800ad653cd087db 100644 (file)
@@ -1255,8 +1255,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
        reg->type = SCALAR_VALUE;
        reg->var_off = tnum_unknown;
        reg->frameno = 0;
-       reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
-                      true : false;
+       reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks;
        __mark_reg_unbounded(reg);
 }
 
index 4385f3d639f23c2347d31061ede5081cc51616cf..8c700f881d920dfccaf531b7df23314f7bbf4120 100644 (file)
@@ -2605,6 +2605,14 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
        struct clone_args args;
        pid_t *kset_tid = kargs->set_tid;
 
+       BUILD_BUG_ON(offsetofend(struct clone_args, tls) !=
+                    CLONE_ARGS_SIZE_VER0);
+       BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) !=
+                    CLONE_ARGS_SIZE_VER1);
+       BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
+                    CLONE_ARGS_SIZE_VER2);
+       BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
+
        if (unlikely(usize > PAGE_SIZE))
                return -E2BIG;
        if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
@@ -2631,7 +2639,8 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
                     !valid_signal(args.exit_signal)))
                return -EINVAL;
 
-       if ((args.flags & CLONE_INTO_CGROUP) && args.cgroup < 0)
+       if ((args.flags & CLONE_INTO_CGROUP) &&
+           (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2))
                return -EINVAL;
 
        *kargs = (struct kernel_clone_args){
index fe40c658f86f851bb175ff1c374e7c808007b04f..453a8a0f48046d090d1ff88b15b28c32ee53cd8c 100644 (file)
@@ -1690,34 +1690,6 @@ out_mput:
        return ret;
 }
 
-/**
- *     setup_irq - setup an interrupt
- *     @irq: Interrupt line to setup
- *     @act: irqaction for the interrupt
- *
- * Used to statically setup interrupts in the early boot process.
- */
-int setup_irq(unsigned int irq, struct irqaction *act)
-{
-       int retval;
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
-               return -EINVAL;
-
-       retval = irq_chip_pm_get(&desc->irq_data);
-       if (retval < 0)
-               return retval;
-
-       retval = __setup_irq(irq, desc, act);
-
-       if (retval)
-               irq_chip_pm_put(&desc->irq_data);
-
-       return retval;
-}
-EXPORT_SYMBOL_GPL(setup_irq);
-
 /*
  * Internal function to unregister an irqaction - used to free
  * regular and special interrupts that are part of the architecture.
@@ -1858,22 +1830,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        return action;
 }
 
-/**
- *     remove_irq - free an interrupt
- *     @irq: Interrupt line to free
- *     @act: irqaction for the interrupt
- *
- * Used to remove interrupts statically setup by the early boot process.
- */
-void remove_irq(unsigned int irq, struct irqaction *act)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
-               __free_irq(desc, act->dev_id);
-}
-EXPORT_SYMBOL_GPL(remove_irq);
-
 /**
  *     free_irq - free an interrupt allocated with request_irq
  *     @irq: Interrupt line to free
index 06548e2ebb72e4928fb483cd7faf692dd86a29a1..d9a49cd6065a20936edbda1b334136ab597cde52 100644 (file)
@@ -825,7 +825,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
                        rcu_cleanup_after_idle();
 
                incby = 1;
-       } else if (tick_nohz_full_cpu(rdp->cpu) &&
+       } else if (irq && tick_nohz_full_cpu(rdp->cpu) &&
                   rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
                   READ_ONCE(rdp->rcu_urgent_qs) &&
                   !READ_ONCE(rdp->rcu_forced_tick)) {
index dac9104d126f7d4483a64a46c965bb48fcbdc811..ff9435dee1df218a96d05ac4fc1130f0bca204e1 100644 (file)
@@ -1003,12 +1003,12 @@ u64 kcpustat_field(struct kernel_cpustat *kcpustat,
                   enum cpu_usage_stat usage, int cpu)
 {
        u64 *cpustat = kcpustat->cpustat;
+       u64 val = cpustat[usage];
        struct rq *rq;
-       u64 val;
        int err;
 
        if (!vtime_accounting_enabled_cpu(cpu))
-               return cpustat[usage];
+               return val;
 
        rq = cpu_rq(cpu);
 
index 008d6ac2342b7536fff293b2ce7054cf9d9cfc98..808244f3ddd9842d684503a493cd8a69092cff74 100644 (file)
@@ -149,6 +149,9 @@ __setup("nohz_full=", housekeeping_nohz_full_setup);
 static int __init housekeeping_isolcpus_setup(char *str)
 {
        unsigned int flags = 0;
+       bool illegal = false;
+       char *par;
+       int len;
 
        while (isalpha(*str)) {
                if (!strncmp(str, "nohz,", 5)) {
@@ -169,8 +172,22 @@ static int __init housekeeping_isolcpus_setup(char *str)
                        continue;
                }
 
-               pr_warn("isolcpus: Error, unknown flag\n");
-               return 0;
+               /*
+                * Skip unknown sub-parameter and validate that it is not
+                * containing an invalid character.
+                */
+               for (par = str, len = 0; *str && *str != ','; str++, len++) {
+                       if (!isalpha(*str) && *str != '_')
+                               illegal = true;
+               }
+
+               if (illegal) {
+                       pr_warn("isolcpus: Invalid flag %.*s\n", len, par);
+                       return 0;
+               }
+
+               pr_info("isolcpus: Skipped unknown flag %.*s\n", len, par);
+               str++;
        }
 
        /* Default behaviour for isolcpus without flags */
index e58a6c619824fd5067825c588de5ab7b9749700f..71310488441437f03f01859c930ee0187a1655bc 100644 (file)
@@ -1510,15 +1510,15 @@ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
        unsigned long flags;
        int ret = -EINVAL;
 
+       if (!valid_signal(sig))
+               return ret;
+
        clear_siginfo(&info);
        info.si_signo = sig;
        info.si_errno = errno;
        info.si_code = SI_ASYNCIO;
        *((sigval_t *)&info.si_pid) = addr;
 
-       if (!valid_signal(sig))
-               return ret;
-
        rcu_read_lock();
        p = pid_task(pid, PIDTYPE_PID);
        if (!p) {
@@ -1557,12 +1557,8 @@ static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
 {
        int ret;
 
-       if (pid > 0) {
-               rcu_read_lock();
-               ret = kill_pid_info(sig, info, find_vpid(pid));
-               rcu_read_unlock();
-               return ret;
-       }
+       if (pid > 0)
+               return kill_proc_info(sig, info, pid);
 
        /* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
        if (pid == INT_MIN)
index 3b30288793fe72504c029413304000d91daa82ac..53bce347cd50bc24b1fbebd8fc5c26cc97e6742a 100644 (file)
@@ -338,7 +338,20 @@ static struct user_namespace *timens_owner(struct ns_common *ns)
 
 static void show_offset(struct seq_file *m, int clockid, struct timespec64 *ts)
 {
-       seq_printf(m, "%d %lld %ld\n", clockid, ts->tv_sec, ts->tv_nsec);
+       char *clock;
+
+       switch (clockid) {
+       case CLOCK_BOOTTIME:
+               clock = "boottime";
+               break;
+       case CLOCK_MONOTONIC:
+               clock = "monotonic";
+               break;
+       default:
+               clock = "unknown";
+               break;
+       }
+       seq_printf(m, "%-10s %10lld %9ld\n", clock, ts->tv_sec, ts->tv_nsec);
 }
 
 void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m)
index dd34a1b46a867d22cd50385af8a175dee03f8d35..3a74736da363ae972358ad4e94018c262516c5ce 100644 (file)
@@ -1088,14 +1088,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
                          struct event_trigger_data *data,
                          struct trace_event_file *file)
 {
-       int ret = register_trigger(glob, ops, data, file);
-
-       if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
-               unregister_trigger(glob, ops, data, file);
-               ret = 0;
-       }
+       if (tracing_alloc_snapshot_instance(file->tr) != 0)
+               return 0;
 
-       return ret;
+       return register_trigger(glob, ops, data, file);
 }
 
 static int
index 50c1f5f08e6f17952295dd561e519299776423ae..21d9c5f6e7ec7552be6f46fa352a29394465e3cc 100644 (file)
@@ -242,6 +242,8 @@ config DEBUG_INFO_DWARF4
 config DEBUG_INFO_BTF
        bool "Generate BTF typeinfo"
        depends on DEBUG_INFO
+       depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+       depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
        help
          Generate deduplicated BTF type information from DWARF debug info.
          Turning this on expects presence of pahole tool, which will convert
index a7e282ead43800031af363affaf39ac65ff0b307..c881abeba0bf2429fbb7bdbd990b46dbcd9b4531 100644 (file)
@@ -413,9 +413,20 @@ static unsigned long move_vma(struct vm_area_struct *vma,
                        /* Always put back VM_ACCOUNT since we won't unmap */
                        vma->vm_flags |= VM_ACCOUNT;
 
-                       vm_acct_memory(vma_pages(new_vma));
+                       vm_acct_memory(new_len >> PAGE_SHIFT);
                }
 
+               /*
+                * VMAs can actually be merged back together in copy_vma
+                * calling merge_vma. This can happen with anonymous vmas
+                * which have not yet been faulted, so if we were to consider
+                * this VMA split we'll end up adding VM_ACCOUNT on the
+                * next VMA, which is completely unrelated if this VMA
+                * was re-merged.
+                */
+               if (split && new_vma == vma)
+                       split = 0;
+
                /* We always clear VM_LOCKED[ONFAULT] on the old vma */
                vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
 
index fa53e9f738935605acf538cdcbbf76f41f410169..ac2aecfbc7a828d4494272245809984bab340c3c 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -524,6 +524,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
 {
        return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
 }
+EXPORT_SYMBOL(__kmalloc_track_caller);
 
 #ifdef CONFIG_NUMA
 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
@@ -531,6 +532,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
 {
        return __do_kmalloc_node(size, gfp, node, caller);
 }
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif
 
 void kfree(const void *block)
index 332d4b459a907b6a25e8fef7452e26c61604c2ea..7002af0f013f92d27652325a082945152fe06f13 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4385,6 +4385,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 
        return ret;
 }
+EXPORT_SYMBOL(__kmalloc_track_caller);
 
 #ifdef CONFIG_NUMA
 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
@@ -4415,6 +4416,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
 
        return ret;
 }
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif
 
 #ifdef CONFIG_SYSFS
index 9c9e763bfe0e38ff843fa590ff83617a8d2feacc..522288177bbd8ce00d2152c218d7eef6fbcd82ab 100644 (file)
@@ -4140,7 +4140,8 @@ EXPORT_SYMBOL(netdev_max_backlog);
 
 int netdev_tstamp_prequeue __read_mostly = 1;
 int netdev_budget __read_mostly = 300;
-unsigned int __read_mostly netdev_budget_usecs = 2000;
+/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
+unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
 int weight_p __read_mostly = 64;           /* old backlog weight */
 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
@@ -8666,8 +8667,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
        const struct net_device_ops *ops = dev->netdev_ops;
        enum bpf_netdev_command query;
        u32 prog_id, expected_id = 0;
-       struct bpf_prog *prog = NULL;
        bpf_op_t bpf_op, bpf_chk;
+       struct bpf_prog *prog;
        bool offload;
        int err;
 
@@ -8733,6 +8734,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
        } else {
                if (!prog_id)
                        return 0;
+               prog = NULL;
        }
 
        err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
index 7628b947dbc3aabd289fd6f282c82a7faf8e3256..7d6ceaa54d2147584e2033956c4740e4d0ef4af1 100644 (file)
@@ -5925,7 +5925,7 @@ BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
                return -EOPNOTSUPP;
        if (unlikely(dev_net(skb->dev) != sock_net(sk)))
                return -ENETUNREACH;
-       if (unlikely(sk->sk_reuseport))
+       if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
                return -ESOCKTNOSUPPORT;
        if (sk_is_refcounted(sk) &&
            unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
index cf0215734ceb07089b60911a269a53bc2f0f1913..4773ad6ec1114cd1dc364cc7470379289d7034db 100644 (file)
@@ -80,7 +80,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
        struct net_device *netdev = to_net_dev(dev);
        struct net *net = dev_net(netdev);
        unsigned long new;
-       int ret = -EINVAL;
+       int ret;
 
        if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
index ce1d8dce9b7aba7aa5a7e4b69c09bb6e889fb067..90509c37d29116b14b3b1849222c3a6148a0cb38 100644 (file)
@@ -1872,7 +1872,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                 * as not suitable for copying when cloning.
                 */
                if (sk_user_data_is_nocopy(newsk))
-                       RCU_INIT_POINTER(newsk->sk_user_data, NULL);
+                       newsk->sk_user_data = NULL;
 
                newsk->sk_err      = 0;
                newsk->sk_err_soft = 0;
index 231b2d494f1cb4f43985966b9a3c05e10f4dfb91..a58fdd3625743d5314aa57828019fc96b40cecc6 100644 (file)
@@ -670,11 +670,16 @@ int dsa_port_link_register_of(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
        struct device_node *phy_np;
+       int port = dp->index;
 
        if (!ds->ops->adjust_link) {
                phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
-               if (of_phy_is_fixed_link(dp->dn) || phy_np)
+               if (of_phy_is_fixed_link(dp->dn) || phy_np) {
+                       if (ds->ops->phylink_mac_link_down)
+                               ds->ops->phylink_mac_link_down(ds, port,
+                                       MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
                        return dsa_port_phylink_register(dp);
+               }
                return 0;
        }
 
index 5465a395da040f2a9e7e0ed57e9509d8c36470ec..1decb25f6764a63578ca0079cd81c487edcc54e1 100644 (file)
@@ -69,10 +69,16 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
        else
                multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
 
-       if (!data[IFLA_HSR_VERSION])
+       if (!data[IFLA_HSR_VERSION]) {
                hsr_version = 0;
-       else
+       } else {
                hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+               if (hsr_version > 1) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Only versions 0..1 are supported");
+                       return -EINVAL;
+               }
+       }
 
        return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
 }
index 30fa42f5997dbc942b03d5b71a172b041b5238cf..c0dd561aa19032f8b6637d9b387e4fa404b31828 100644 (file)
@@ -614,12 +614,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
        return NULL;
 }
 
-static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
+static int ip_mc_autojoin_config(struct net *net, bool join,
+                                const struct in_ifaddr *ifa)
 {
+#if defined(CONFIG_IP_MULTICAST)
        struct ip_mreqn mreq = {
                .imr_multiaddr.s_addr = ifa->ifa_address,
                .imr_ifindex = ifa->ifa_dev->dev->ifindex,
        };
+       struct sock *sk = net->ipv4.mc_autojoin_sk;
        int ret;
 
        ASSERT_RTNL();
@@ -632,6 +635,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
        release_sock(sk);
 
        return ret;
+#else
+       return -EOPNOTSUPP;
+#endif
 }
 
 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -675,7 +681,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
                        continue;
 
                if (ipv4_is_multicast(ifa->ifa_address))
-                       ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
+                       ip_mc_autojoin_config(net, false, ifa);
                __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
                return 0;
        }
@@ -940,8 +946,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
                 */
                set_ifa_lifetime(ifa, valid_lft, prefered_lft);
                if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
-                       int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
-                                              true, ifa);
+                       int ret = ip_mc_autojoin_config(net, true, ifa);
 
                        if (ret < 0) {
                                inet_free_ifa(ifa);
index 2688f3e8216528a116f90f00d433a331ee0ae983..fc5000370030d67094ba11f15aaaaaa7ba519cde 100644 (file)
@@ -229,6 +229,25 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
        return res;
 }
 
+static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
+                                 struct flowi6 *fl6)
+{
+       struct net *net = sock_net(sk);
+       struct dst_entry *dst;
+       bool res = false;
+
+       dst = ip6_route_output(net, sk, fl6);
+       if (!dst->error) {
+               struct rt6_info *rt = (struct rt6_info *)dst;
+               struct in6_addr prefsrc;
+
+               rt6_get_prefsrc(rt, &prefsrc);
+               res = !ipv6_addr_any(&prefsrc);
+       }
+       dst_release(dst);
+       return res;
+}
+
 /*
  *     an inline helper for the "simple" if statement below
  *     checks if parameter problem report is caused by an
@@ -527,7 +546,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                saddr = force_saddr;
        if (saddr) {
                fl6.saddr = *saddr;
-       } else {
+       } else if (!icmpv6_rt_has_prefsrc(sk, type, &fl6)) {
                /* select a more meaningful saddr from input if */
                struct net_device *in_netdev;
 
index 75421a472d25ac4b2cf8df3959b1ea6ec7547827..4c7e0a27fa9cb588ee43fb81f5b2683271bf1754 100644 (file)
@@ -434,7 +434,7 @@ static struct genl_family seg6_genl_family __ro_after_init = {
 
 int __init seg6_init(void)
 {
-       int err = -ENOMEM;
+       int err;
 
        err = genl_register_family(&seg6_genl_family);
        if (err)
index f5a9bdc4980c5b25edf736aaa961269c5cd0c537..ebb381c3f1b9901cf302fc8b3052163eb9ab5504 100644 (file)
@@ -920,51 +920,51 @@ static const struct genl_ops l2tp_nl_ops[] = {
                .cmd = L2TP_CMD_TUNNEL_CREATE,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_tunnel_create,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
        {
                .cmd = L2TP_CMD_TUNNEL_DELETE,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_tunnel_delete,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
        {
                .cmd = L2TP_CMD_TUNNEL_MODIFY,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_tunnel_modify,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
        {
                .cmd = L2TP_CMD_TUNNEL_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_tunnel_get,
                .dumpit = l2tp_nl_cmd_tunnel_dump,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
        {
                .cmd = L2TP_CMD_SESSION_CREATE,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_session_create,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
        {
                .cmd = L2TP_CMD_SESSION_DELETE,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_session_delete,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
        {
                .cmd = L2TP_CMD_SESSION_MODIFY,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_session_modify,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
        {
                .cmd = L2TP_CMD_SESSION_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = l2tp_nl_cmd_session_get,
                .dumpit = l2tp_nl_cmd_session_dump,
-               .flags = GENL_ADMIN_PERM,
+               .flags = GENL_UNS_ADMIN_PERM,
        },
 };
 
index 8345926193de487c6f783b0a452ff0c74eba02c1..0e9ad60fb2b3d9dc9a0bcacfb4e8ec08fd112df0 100644 (file)
@@ -1069,7 +1069,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
                if (hw->max_signal <= 0) {
                        result = -EINVAL;
-                       goto fail_wiphy_register;
+                       goto fail_workqueue;
                }
        }
 
@@ -1135,7 +1135,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 
        result = ieee80211_init_cipher_suites(local);
        if (result < 0)
-               goto fail_wiphy_register;
+               goto fail_workqueue;
 
        if (!local->ops->remain_on_channel)
                local->hw.wiphy->max_remain_on_channel_duration = 5000;
@@ -1161,10 +1161,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 
        local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
 
-       result = wiphy_register(local->hw.wiphy);
-       if (result < 0)
-               goto fail_wiphy_register;
-
        /*
         * We use the number of queues for feature tests (QoS, HT) internally
         * so restrict them appropriately.
@@ -1217,9 +1213,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                goto fail_flows;
 
        rtnl_lock();
-
        result = ieee80211_init_rate_ctrl_alg(local,
                                              hw->rate_control_algorithm);
+       rtnl_unlock();
        if (result < 0) {
                wiphy_debug(local->hw.wiphy,
                            "Failed to initialize rate control algorithm\n");
@@ -1273,6 +1269,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                local->sband_allocated |= BIT(band);
        }
 
+       result = wiphy_register(local->hw.wiphy);
+       if (result < 0)
+               goto fail_wiphy_register;
+
+       rtnl_lock();
+
        /* add one default STA interface if supported */
        if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
            !ieee80211_hw_check(hw, NO_AUTO_VIF)) {
@@ -1312,17 +1314,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 #if defined(CONFIG_INET) || defined(CONFIG_IPV6)
  fail_ifa:
 #endif
+       wiphy_unregister(local->hw.wiphy);
+ fail_wiphy_register:
        rtnl_lock();
        rate_control_deinitialize(local);
        ieee80211_remove_interfaces(local);
- fail_rate:
        rtnl_unlock();
+ fail_rate:
  fail_flows:
        ieee80211_led_exit(local);
        destroy_workqueue(local->workqueue);
  fail_workqueue:
-       wiphy_unregister(local->hw.wiphy);
- fail_wiphy_register:
        if (local->wiphy_ciphers_allocated)
                kfree(local->hw.wiphy->cipher_suites);
        kfree(local->int_scan_req);
@@ -1372,8 +1374,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
        skb_queue_purge(&local->skb_queue_unreliable);
        skb_queue_purge(&local->skb_queue_tdls_chsw);
 
-       destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
+       destroy_workqueue(local->workqueue);
        ieee80211_led_exit(local);
        kfree(local->int_scan_req);
 }
index d09b3c789314da9ade860f96892483a334609b2a..36978a0e500017d0c193fb5bfb86a8f61f210206 100644 (file)
@@ -1257,15 +1257,15 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
                    sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
                        mesh_neighbour_update(sdata, mgmt->sa, &elems,
                                              rx_status);
+
+               if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
+                   !sdata->vif.csa_active)
+                       ieee80211_mesh_process_chnswitch(sdata, &elems, true);
        }
 
        if (ifmsh->sync_ops)
                ifmsh->sync_ops->rx_bcn_presp(sdata,
                        stype, mgmt, &elems, rx_status);
-
-       if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
-           !sdata->vif.csa_active)
-               ieee80211_mesh_process_chnswitch(sdata, &elems, true);
 }
 
 int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
@@ -1373,6 +1373,9 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
        ieee802_11_parse_elems(pos, len - baselen, true, &elems,
                               mgmt->bssid, NULL);
 
+       if (!mesh_matches_local(sdata, &elems))
+               return;
+
        ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
        if (!--ifmsh->chsw_ttl)
                fwd_csa = false;
index 939a5045181a5701b50b4348867790b1ef414960..9936e33ac3511eb844ec3075b46bd36cfd7993ab 100644 (file)
@@ -97,12 +97,7 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
        if (likely(!__mptcp_needs_tcp_fallback(msk)))
                return NULL;
 
-       if (msk->subflow) {
-               release_sock((struct sock *)msk);
-               return msk->subflow;
-       }
-
-       return NULL;
+       return msk->subflow;
 }
 
 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
@@ -734,9 +729,10 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        goto out;
        }
 
+fallback:
        ssock = __mptcp_tcp_fallback(msk);
        if (unlikely(ssock)) {
-fallback:
+               release_sock(sk);
                pr_debug("fallback passthrough");
                ret = sock_sendmsg(ssock, msg);
                return ret >= 0 ? ret + copied : (copied ? copied : ret);
@@ -769,8 +765,14 @@ fallback:
                if (ret < 0)
                        break;
                if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) {
+                       /* Can happen for passive sockets:
+                        * 3WHS negotiated MPTCP, but first packet after is
+                        * plain TCP (e.g. due to middlebox filtering unknown
+                        * options).
+                        *
+                        * Fall back to TCP.
+                        */
                        release_sock(ssk);
-                       ssock = __mptcp_tcp_fallback(msk);
                        goto fallback;
                }
 
@@ -883,6 +885,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        ssock = __mptcp_tcp_fallback(msk);
        if (unlikely(ssock)) {
 fallback:
+               release_sock(sk);
                pr_debug("fallback-read subflow=%p",
                         mptcp_subflow_ctx(ssock->sk));
                copied = sock_recvmsg(ssock, msg, flags);
@@ -1467,12 +1470,11 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
         */
        lock_sock(sk);
        ssock = __mptcp_tcp_fallback(msk);
+       release_sock(sk);
        if (ssock)
                return tcp_setsockopt(ssock->sk, level, optname, optval,
                                      optlen);
 
-       release_sock(sk);
-
        return -EOPNOTSUPP;
 }
 
@@ -1492,12 +1494,11 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
         */
        lock_sock(sk);
        ssock = __mptcp_tcp_fallback(msk);
+       release_sock(sk);
        if (ssock)
                return tcp_getsockopt(ssock->sk, level, optname, optval,
                                      option);
 
-       release_sock(sk);
-
        return -EOPNOTSUPP;
 }
 
index 8dd17589217d71e4e38f6d442434130cadd290e0..340cb955af25c40da445e1f5482e8ec278dbf098 100644 (file)
@@ -86,7 +86,8 @@ find_set_type(const char *name, u8 family, u8 revision)
 {
        struct ip_set_type *type;
 
-       list_for_each_entry_rcu(type, &ip_set_type_list, list)
+       list_for_each_entry_rcu(type, &ip_set_type_list, list,
+                               lockdep_is_held(&ip_set_type_mutex))
                if (STRNCMP(type->name, name) &&
                    (type->family == family ||
                     type->family == NFPROTO_UNSPEC) &&
index 4471393da6d8dc1e0cfece7bc8fb2e69963d29b7..9780bd93b7e4958771e7244a0fb9101231597a80 100644 (file)
@@ -3542,6 +3542,7 @@ cont:
                        continue;
                if (!strcmp(set->name, i->name)) {
                        kfree(set->name);
+                       set->name = NULL;
                        return -ENFILE;
                }
        }
@@ -3961,8 +3962,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
                              NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
                              NFT_SET_MAP | NFT_SET_EVAL |
-                             NFT_SET_OBJECT))
-                       return -EINVAL;
+                             NFT_SET_OBJECT | NFT_SET_CONCAT))
+                       return -EOPNOTSUPP;
                /* Only one of these operations is supported */
                if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
                             (NFT_SET_MAP | NFT_SET_OBJECT))
@@ -4000,7 +4001,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
                if (objtype == NFT_OBJECT_UNSPEC ||
                    objtype > NFT_OBJECT_MAX)
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
        } else if (flags & NFT_SET_OBJECT)
                return -EINVAL;
        else
index 1e70359d633c0eac04745860a9deb6bfd2f2c6df..f1363b8aabba84a63170fc84dbeeff132d58276c 100644 (file)
@@ -29,7 +29,7 @@ void nft_lookup_eval(const struct nft_expr *expr,
 {
        const struct nft_lookup *priv = nft_expr_priv(expr);
        const struct nft_set *set = priv->set;
-       const struct nft_set_ext *ext;
+       const struct nft_set_ext *ext = NULL;
        bool found;
 
        found = set->ops->lookup(nft_net(pkt), set, &regs->data[priv->sreg],
@@ -39,11 +39,13 @@ void nft_lookup_eval(const struct nft_expr *expr,
                return;
        }
 
-       if (set->flags & NFT_SET_MAP)
-               nft_data_copy(&regs->data[priv->dreg],
-                             nft_set_ext_data(ext), set->dlen);
+       if (ext) {
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(&regs->data[priv->dreg],
+                                     nft_set_ext_data(ext), set->dlen);
 
-       nft_set_elem_update_expr(ext, regs, pkt);
+               nft_set_elem_update_expr(ext, regs, pkt);
+       }
 }
 
 static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
index 32f0fc8be3a4c475f85595bf1fadba92afa600ea..2a81ea4218193aac5ae82a38d770572509c6d14e 100644 (file)
@@ -81,7 +81,6 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
        u32 idx, off;
 
        nft_bitmap_location(set, key, &idx, &off);
-       *ext = NULL;
 
        return nft_bitmap_active(priv->bitmap, idx, off, genmask);
 }
index 3a5552e14f75dbc260d385a365e050ce337d02ef..3ffef454d4699f025fb390c7e60a5bc119e77887 100644 (file)
@@ -218,27 +218,26 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
 
        /* Detect overlaps as we descend the tree. Set the flag in these cases:
         *
-        * a1. |__ _ _?  >|__ _ _  (insert start after existing start)
-        * a2. _ _ __>|  ?_ _ __|  (insert end before existing end)
-        * a3. _ _ ___|  ?_ _ _>|  (insert end after existing end)
-        * a4. >|__ _ _   _ _ __|  (insert start before existing end)
+        * a1. _ _ __>|  ?_ _ __|  (insert end before existing end)
+        * a2. _ _ ___|  ?_ _ _>|  (insert end after existing end)
+        * a3. _ _ ___? >|_ _ __|  (insert start before existing end)
         *
         * and clear it later on, as we eventually reach the points indicated by
         * '?' above, in the cases described below. We'll always meet these
         * later, locally, due to tree ordering, and overlaps for the intervals
         * that are the closest together are always evaluated last.
         *
-        * b1. |__ _ _!  >|__ _ _  (insert start after existing end)
-        * b2. _ _ __>|  !_ _ __|  (insert end before existing start)
-        * b3. !_____>|            (insert end after existing start)
+        * b1. _ _ __>|  !_ _ __|  (insert end before existing start)
+        * b2. _ _ ___|  !_ _ _>|  (insert end after existing start)
+        * b3. _ _ ___! >|_ _ __|  (insert start after existing end)
         *
-        * Case a4. resolves to b1.:
+        * Case a3. resolves to b3.:
         * - if the inserted start element is the leftmost, because the '0'
         *   element in the tree serves as end element
         * - otherwise, if an existing end is found. Note that end elements are
         *   always inserted after corresponding start elements.
         *
-        * For a new, rightmost pair of elements, we'll hit cases b1. and b3.,
+        * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
         * in that order.
         *
         * The flag is also cleared in two special cases:
@@ -262,9 +261,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                        p = &parent->rb_left;
 
                        if (nft_rbtree_interval_start(new)) {
-                               overlap = nft_rbtree_interval_start(rbe) &&
-                                         nft_set_elem_active(&rbe->ext,
-                                                             genmask);
+                               if (nft_rbtree_interval_end(rbe) &&
+                                   nft_set_elem_active(&rbe->ext, genmask))
+                                       overlap = false;
                        } else {
                                overlap = nft_rbtree_interval_end(rbe) &&
                                          nft_set_elem_active(&rbe->ext,
index 75bd0e5dd312e1d9e807f15de87cb4b0e2d3f387..7b2f359bfce46160acc3c29e7a9b4b68bbe3b731 100644 (file)
@@ -346,6 +346,9 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
 
        pr_debug("checkentry targinfo%s\n", info->label);
 
+       if (info->send_nl_msg)
+               return -EOPNOTSUPP;
+
        ret = idletimer_tg_helper((struct idletimer_tg_info *)info);
        if(ret < 0)
        {
index e22092e4a783861a57179a61bc055d0b5e53f1ca..7ed31b5e77e46060b9138e6c8e11cc18febffc24 100644 (file)
@@ -906,20 +906,21 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 
        node = NULL;
        if (addr->sq_node == QRTR_NODE_BCAST) {
-               enqueue_fn = qrtr_bcast_enqueue;
-               if (addr->sq_port != QRTR_PORT_CTRL) {
+               if (addr->sq_port != QRTR_PORT_CTRL &&
+                   qrtr_local_nid != QRTR_NODE_BCAST) {
                        release_sock(sk);
                        return -ENOTCONN;
                }
+               enqueue_fn = qrtr_bcast_enqueue;
        } else if (addr->sq_node == ipc->us.sq_node) {
                enqueue_fn = qrtr_local_enqueue;
        } else {
-               enqueue_fn = qrtr_node_enqueue;
                node = qrtr_node_lookup(addr->sq_node);
                if (!node) {
                        release_sock(sk);
                        return -ECONNRESET;
                }
+               enqueue_fn = qrtr_node_enqueue;
        }
 
        plen = (len + 3) & ~3;
index 50f13f1d4ae091accfbeae36192cc219e9e63aca..071a261fdaabbfbefe2ddc007c162f61e70a1d96 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 Oracle.  All rights reserved.
+ * Copyright (c) 2006, 2020 Oracle and/or its affiliates.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -162,12 +162,12 @@ static void rds_message_purge(struct rds_message *rm)
        if (rm->rdma.op_active)
                rds_rdma_free_op(&rm->rdma);
        if (rm->rdma.op_rdma_mr)
-               rds_mr_put(rm->rdma.op_rdma_mr);
+               kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final);
 
        if (rm->atomic.op_active)
                rds_atomic_free_op(&rm->atomic);
        if (rm->atomic.op_rdma_mr)
-               rds_mr_put(rm->atomic.op_rdma_mr);
+               kref_put(&rm->atomic.op_rdma_mr->r_kref, __rds_put_mr_final);
 }
 
 void rds_message_put(struct rds_message *rm)
@@ -308,26 +308,20 @@ out:
 /*
  * RDS ops use this to grab SG entries from the rm's sg pool.
  */
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
-                                         int *ret)
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
 {
        struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
        struct scatterlist *sg_ret;
 
-       if (WARN_ON(!ret))
-               return NULL;
-
        if (nents <= 0) {
                pr_warn("rds: alloc sgs failed! nents <= 0\n");
-               *ret = -EINVAL;
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        if (rm->m_used_sgs + nents > rm->m_total_sgs) {
                pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
                        rm->m_total_sgs, rm->m_used_sgs, nents);
-               *ret = -ENOMEM;
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        sg_ret = &sg_first[rm->m_used_sgs];
@@ -343,7 +337,6 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        unsigned int i;
        int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
        int extra_bytes = num_sgs * sizeof(struct scatterlist);
-       int ret;
 
        rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
        if (!rm)
@@ -352,10 +345,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
        rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
        rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
-       rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
-       if (!rm->data.op_sg) {
+       rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+       if (IS_ERR(rm->data.op_sg)) {
                rds_message_put(rm);
-               return ERR_PTR(ret);
+               return ERR_CAST(rm->data.op_sg);
        }
 
        for (i = 0; i < rm->data.op_nents; ++i) {
index 585e6b3b69ce4c32ec9404ee5ad88065b0979f75..a7ae11846cd7f57c066ec4967d75ca3b5d906c57 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2020 Oracle and/or its affiliates.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -84,7 +84,7 @@ static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
        if (insert) {
                rb_link_node(&insert->r_rb_node, parent, p);
                rb_insert_color(&insert->r_rb_node, root);
-               refcount_inc(&insert->r_refcount);
+               kref_get(&insert->r_kref);
        }
        return NULL;
 }
@@ -99,10 +99,7 @@ static void rds_destroy_mr(struct rds_mr *mr)
        unsigned long flags;
 
        rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
-                       mr->r_key, refcount_read(&mr->r_refcount));
-
-       if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
-               return;
+                mr->r_key, kref_read(&mr->r_kref));
 
        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
        if (!RB_EMPTY_NODE(&mr->r_rb_node))
@@ -115,8 +112,10 @@ static void rds_destroy_mr(struct rds_mr *mr)
                mr->r_trans->free_mr(trans_private, mr->r_invalidate);
 }
 
-void __rds_put_mr_final(struct rds_mr *mr)
+void __rds_put_mr_final(struct kref *kref)
 {
+       struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
+
        rds_destroy_mr(mr);
        kfree(mr);
 }
@@ -140,8 +139,7 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
                RB_CLEAR_NODE(&mr->r_rb_node);
                spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
-               rds_destroy_mr(mr);
-               rds_mr_put(mr);
+               kref_put(&mr->r_kref, __rds_put_mr_final);
                spin_lock_irqsave(&rs->rs_rdma_lock, flags);
        }
        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
@@ -242,7 +240,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
                goto out;
        }
 
-       refcount_set(&mr->r_refcount, 1);
+       kref_init(&mr->r_kref);
        RB_CLEAR_NODE(&mr->r_rb_node);
        mr->r_trans = rs->rs_transport;
        mr->r_sock = rs;
@@ -343,7 +341,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 
        rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
        if (mr_ret) {
-               refcount_inc(&mr->r_refcount);
+               kref_get(&mr->r_kref);
                *mr_ret = mr;
        }
 
@@ -351,7 +349,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 out:
        kfree(pages);
        if (mr)
-               rds_mr_put(mr);
+               kref_put(&mr->r_kref, __rds_put_mr_final);
        return ret;
 }
 
@@ -434,13 +432,7 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
        if (!mr)
                return -EINVAL;
 
-       /*
-        * call rds_destroy_mr() ourselves so that we're sure it's done by the time
-        * we return.  If we let rds_mr_put() do it it might not happen until
-        * someone else drops their ref.
-        */
-       rds_destroy_mr(mr);
-       rds_mr_put(mr);
+       kref_put(&mr->r_kref, __rds_put_mr_final);
        return 0;
 }
 
@@ -464,6 +456,14 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
                return;
        }
 
+       /* Get a reference so that the MR won't go away before calling
+        * sync_mr() below.
+        */
+       kref_get(&mr->r_kref);
+
+       /* If it is going to be freed, remove it from the tree now so
+        * that no other thread can find it and free it.
+        */
        if (mr->r_use_once || force) {
                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
                RB_CLEAR_NODE(&mr->r_rb_node);
@@ -477,12 +477,13 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
        if (mr->r_trans->sync_mr)
                mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
 
+       /* Release the reference held above. */
+       kref_put(&mr->r_kref, __rds_put_mr_final);
+
        /* If the MR was marked as invalidate, this will
         * trigger an async flush. */
-       if (zot_me) {
-               rds_destroy_mr(mr);
-               rds_mr_put(mr);
-       }
+       if (zot_me)
+               kref_put(&mr->r_kref, __rds_put_mr_final);
 }
 
 void rds_rdma_free_op(struct rm_rdma_op *ro)
@@ -490,7 +491,7 @@ void rds_rdma_free_op(struct rm_rdma_op *ro)
        unsigned int i;
 
        if (ro->op_odp_mr) {
-               rds_mr_put(ro->op_odp_mr);
+               kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
        } else {
                for (i = 0; i < ro->op_nents; i++) {
                        struct page *page = sg_page(&ro->op_sg[i]);
@@ -664,9 +665,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
        op->op_odp_mr = NULL;
 
        WARN_ON(!nr_pages);
-       op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
-       if (!op->op_sg)
+       op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
+       if (IS_ERR(op->op_sg)) {
+               ret = PTR_ERR(op->op_sg);
                goto out_pages;
+       }
 
        if (op->op_notify || op->op_recverr) {
                /* We allocate an uninitialized notifier here, because
@@ -730,7 +733,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                                goto out_pages;
                        }
                        RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
-                       refcount_set(&local_odp_mr->r_refcount, 1);
+                       kref_init(&local_odp_mr->r_kref);
                        local_odp_mr->r_trans = rs->rs_transport;
                        local_odp_mr->r_sock = rs;
                        local_odp_mr->r_trans_private =
@@ -827,7 +830,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
        if (!mr)
                err = -EINVAL;  /* invalid r_key */
        else
-               refcount_inc(&mr->r_refcount);
+               kref_get(&mr->r_kref);
        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 
        if (mr) {
@@ -905,9 +908,11 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
        rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
        rm->atomic.op_active = 1;
        rm->atomic.op_recverr = rs->rs_recverr;
-       rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
-       if (!rm->atomic.op_sg)
+       rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
+       if (IS_ERR(rm->atomic.op_sg)) {
+               ret = PTR_ERR(rm->atomic.op_sg);
                goto err;
+       }
 
        /* verify 8 byte-aligned */
        if (args->local_addr & 0x7) {
index e4a6035230836981a286b2b87ea09de361bff3a8..6019b0c004a9df23fbaafd614c8b03492485b294 100644 (file)
@@ -291,7 +291,7 @@ struct rds_incoming {
 
 struct rds_mr {
        struct rb_node          r_rb_node;
-       refcount_t              r_refcount;
+       struct kref             r_kref;
        u32                     r_key;
 
        /* A copy of the creation flags */
@@ -299,19 +299,11 @@ struct rds_mr {
        unsigned int            r_invalidate:1;
        unsigned int            r_write:1;
 
-       /* This is for RDS_MR_DEAD.
-        * It would be nice & consistent to make this part of the above
-        * bit field here, but we need to use test_and_set_bit.
-        */
-       unsigned long           r_state;
        struct rds_sock         *r_sock; /* back pointer to the socket that owns us */
        struct rds_transport    *r_trans;
        void                    *r_trans_private;
 };
 
-/* Flags for mr->r_state */
-#define RDS_MR_DEAD            0
-
 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
 {
        return r_key | (((u64) offset) << 32);
@@ -852,8 +844,7 @@ rds_conn_connecting(struct rds_connection *conn)
 
 /* message.c */
 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
-                                         int *ret);
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
                               bool zcopy);
 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
@@ -946,12 +937,7 @@ void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
                    struct cmsghdr *cmsg);
 
-void __rds_put_mr_final(struct rds_mr *mr);
-static inline void rds_mr_put(struct rds_mr *mr)
-{
-       if (refcount_dec_and_test(&mr->r_refcount))
-               __rds_put_mr_final(mr);
-}
+void __rds_put_mr_final(struct kref *kref);
 
 static inline bool rds_destroy_pending(struct rds_connection *conn)
 {
index 82dcd8b84fe779ab42c4c81e5a7c1faf94c2158d..68e2bdb08fd099fd930d0ea66ae037af6a3ba8d2 100644 (file)
@@ -1274,9 +1274,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
        /* Attach data to the rm */
        if (payload_len) {
-               rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
-               if (!rm->data.op_sg)
+               rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+               if (IS_ERR(rm->data.op_sg)) {
+                       ret = PTR_ERR(rm->data.op_sg);
                        goto out;
+               }
                ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
                if (ret)
                        goto out;
index a6c1349e965d94c3ef6361c04298799a25a2f8c3..01135e54d95d2c6f7cffc3010e0c2ca7beb2761c 100644 (file)
@@ -165,15 +165,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                        goto error;
                }
 
-               /* we want to set the don't fragment bit */
-               opt = IPV6_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
-                                       (char *) &opt, sizeof(opt));
-               if (ret < 0) {
-                       _debug("setsockopt failed");
-                       goto error;
-               }
-
                /* Fall through and set IPv4 options too otherwise we don't get
                 * errors from IPv4 packets sent through the IPv6 socket.
                 */
index bad3d242034466d9b3a21b9d92bd26d30d829c4a..90e263c6aa69e441c5c9e1232bab6acc375a663b 100644 (file)
@@ -474,41 +474,21 @@ send_fragmentable:
        skb->tstamp = ktime_get_real();
 
        switch (conn->params.local->srx.transport.family) {
+       case AF_INET6:
        case AF_INET:
                opt = IP_PMTUDISC_DONT;
-               ret = kernel_setsockopt(conn->params.local->socket,
-                                       SOL_IP, IP_MTU_DISCOVER,
-                                       (char *)&opt, sizeof(opt));
-               if (ret == 0) {
-                       ret = kernel_sendmsg(conn->params.local->socket, &msg,
-                                            iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_seconds();
-
-                       opt = IP_PMTUDISC_DO;
-                       kernel_setsockopt(conn->params.local->socket, SOL_IP,
-                                         IP_MTU_DISCOVER,
-                                         (char *)&opt, sizeof(opt));
-               }
-               break;
-
-#ifdef CONFIG_AF_RXRPC_IPV6
-       case AF_INET6:
-               opt = IPV6_PMTUDISC_DONT;
-               ret = kernel_setsockopt(conn->params.local->socket,
-                                       SOL_IPV6, IPV6_MTU_DISCOVER,
-                                       (char *)&opt, sizeof(opt));
-               if (ret == 0) {
-                       ret = kernel_sendmsg(conn->params.local->socket, &msg,
-                                            iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_seconds();
-
-                       opt = IPV6_PMTUDISC_DO;
-                       kernel_setsockopt(conn->params.local->socket,
-                                         SOL_IPV6, IPV6_MTU_DISCOVER,
-                                         (char *)&opt, sizeof(opt));
-               }
+               kernel_setsockopt(conn->params.local->socket,
+                                 SOL_IP, IP_MTU_DISCOVER,
+                                 (char *)&opt, sizeof(opt));
+               ret = kernel_sendmsg(conn->params.local->socket, &msg,
+                                    iov, 2, len);
+               conn->params.peer->last_tx_at = ktime_get_seconds();
+
+               opt = IP_PMTUDISC_DO;
+               kernel_setsockopt(conn->params.local->socket,
+                                 SOL_IP, IP_MTU_DISCOVER,
+                                 (char *)&opt, sizeof(opt));
                break;
-#endif
 
        default:
                BUG();
index f6a3b969ead0d25e422a67121a4ba7d5bf26e1b6..55bd1429678f996a328f2be6fdb0b7e0de771168 100644 (file)
@@ -1667,6 +1667,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
                skb_ext_del(skb, TC_SKB_EXT);
 
                tp = rcu_dereference_bh(fchain->filter_chain);
+               last_executed_chain = fchain->index;
        }
 
        ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
index 467c53a1fb5cf2a5d98513853024d3c6a61ab4a9..d4675e922a8f717682f465fd2c5664ba6bc1e878 100644 (file)
@@ -1065,7 +1065,7 @@ static void tipc_link_update_cwin(struct tipc_link *l, int released,
        /* Enter fast recovery */
        if (unlikely(retransmitted)) {
                l->ssthresh = max_t(u16, l->window / 2, 300);
-               l->window = l->ssthresh;
+               l->window = min_t(u16, l->ssthresh, l->window);
                return;
        }
        /* Enter slow start */
index 156efce50dbd10e134e13ba28afc802f78e9a4b5..0e989005bdc2126390d5491af5c42632b1e2373c 100644 (file)
@@ -56,9 +56,9 @@ enum {
        TLS_NUM_PROTS,
 };
 
-static struct proto *saved_tcpv6_prot;
+static const struct proto *saved_tcpv6_prot;
 static DEFINE_MUTEX(tcpv6_prot_mutex);
-static struct proto *saved_tcpv4_prot;
+static const struct proto *saved_tcpv4_prot;
 static DEFINE_MUTEX(tcpv4_prot_mutex);
 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
 static struct proto_ops tls_sw_proto_ops;
index 5fa402144cda460cf4ec8dee94d22cc4059f068e..692bcd35f8094e85758f529e24459d2d10fd7368 100644 (file)
@@ -644,10 +644,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY,
                                         .len = NL80211_HE_MAX_CAPABILITY_LEN },
 
-       [NL80211_ATTR_FTM_RESPONDER] = {
-               .type = NLA_NESTED,
-               .validation_data = nl80211_ftm_responder_policy,
-       },
+       [NL80211_ATTR_FTM_RESPONDER] =
+               NLA_POLICY_NESTED(nl80211_ftm_responder_policy),
        [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
        [NL80211_ATTR_PEER_MEASUREMENTS] =
                NLA_POLICY_NESTED(nl80211_pmsr_attr_policy),
index fa7bb5e060d0c1bc29053fef34debbfc683c8be5..ed7a6060f73cadc9c0b812898be9132387a70846 100644 (file)
@@ -343,7 +343,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
        unsigned int chunks, chunks_per_page;
        u64 addr = mr->addr, size = mr->len;
-       int size_chk, err;
+       int err;
 
        if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
                /* Strictly speaking we could support this, if:
@@ -382,8 +382,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
                        return -EINVAL;
        }
 
-       size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
-       if (size_chk < 0)
+       if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
                return -EINVAL;
 
        umem->address = (unsigned long)addr;
index 356f90e4522b4cc39bf05ae4765d0517c43e5dc4..c350108aa38de784b2652bffcc1732191e93ed88 100644 (file)
@@ -131,8 +131,9 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
                u64 page_start = addr & ~(PAGE_SIZE - 1);
                u64 first_len = PAGE_SIZE - (addr - page_start);
 
-               memcpy(to_buf, from_buf, first_len + metalen);
-               memcpy(next_pg_addr, from_buf + first_len, len - first_len);
+               memcpy(to_buf, from_buf, first_len);
+               memcpy(next_pg_addr, from_buf + first_len,
+                      len + metalen - first_len);
 
                return;
        }
index 9a8cc10cffd0b03522f4781fd0ac3ad83586b569..c71832b2312b94ba31ef060860bb9c6c04717183 100755 (executable)
@@ -25,7 +25,7 @@ my $fix = 0;
 my $warn = 0;
 
 if (! -d ".git") {
-       printf "Warning: can't check if file exists, as this is not a git tree";
+       printf "Warning: can't check if file exists, as this is not a git tree\n";
        exit 0;
 }
 
index 2f3c3a7e1620fb205767ae2f7d8dd32100f92547..ef85f8b7d4a76ebcd0bed42be4f8105eae42a15f 100644 (file)
@@ -13,7 +13,7 @@ dtc-objs      += dtc-lexer.lex.o dtc-parser.tab.o
 HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt
 
 ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),)
-ifneq ($(CHECK_DTBS),)
+ifneq ($(CHECK_DT_BINDING)$(CHECK_DTBS),)
 $(error dtc needs libyaml for DT schema validation support. \
        Install the necessary libyaml development package.)
 endif
index f2d73f04e71d6a7e823904eb1265b3f94f227035..f746ca8fa403c43fc63cfbbe6d637bd06d61ff08 100755 (executable)
@@ -853,7 +853,7 @@ sub output_function_rst(%) {
 
        if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
            # pointer-to-function
-           print $1 . $parameter . ") (" . $2;
+           print $1 . $parameter . ") (" . $2 . ")";
        } else {
            print $type . " " . $parameter;
        }
index 415f3f1c2da042f794608522aeddd772fc706011..d0cde6685627f2ab6d3cc0d6919b7e8a50c10dd0 100644 (file)
@@ -139,6 +139,8 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
        n = key_serial_next(p, v);
        if (n)
                *_pos = key_node_serial(n);
+       else
+               (*_pos)++;
        return n;
 }
 
index 70ecdc78efbd9f148d5a6744f4981ded87e5aac3..c21b922e5ebec1c851c74c22d43e17c39ff0c4e8 100644 (file)
@@ -1035,14 +1035,14 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
        if (!str)
                return -ENOMEM;
 
-       /* it's expected the caller should free the str */
-       *strp = str;
-
        rc = next_entry(str, fp, len);
-       if (rc)
+       if (rc) {
+               kfree(str);
                return rc;
+       }
 
        str[len] = '\0';
+       *strp = str;
        return 0;
 }
 
index 4ca6b09056f396e7d2cfd8e4ca7ba63a2584e167..3bc9224d5e4fee31b7d133bc84e2bfdb966beb50 100644 (file)
@@ -21,16 +21,17 @@ config SND_HDA_EXT_CORE
        select SND_HDA_CORE
 
 config SND_HDA_PREALLOC_SIZE
-       int "Pre-allocated buffer size for HD-audio driver" if !SND_DMA_SGBUF
+       int "Pre-allocated buffer size for HD-audio driver"
        range 0 32768
-       default 0 if SND_DMA_SGBUF
+       default 2048 if SND_DMA_SGBUF
        default 64 if !SND_DMA_SGBUF
        help
          Specifies the default pre-allocated buffer-size in kB for the
          HD-audio driver.  A larger buffer (e.g. 2048) is preferred
          for systems using PulseAudio.  The default 64 is chosen just
          for compatibility reasons.
-         On x86 systems, the default is zero as we need no preallocation.
+         On x86 systems, the default is 2048 as a reasonable value for
+         most of modern systems.
 
          Note that the pre-allocation size can be changed dynamically
          via a proc file (/proc/asound/card*/pcm*/sub*/prealloc), too.
index 6e3177bcc7097954ae2e26cec1e7620e394b7088..015c0d676897a48b8e02bc29e88557d8c9b1afb0 100644 (file)
@@ -168,7 +168,7 @@ static int src_get_rsc_ctrl_blk(void **rblk)
 
 static int src_put_rsc_ctrl_blk(void *blk)
 {
-       kfree((struct src_rsc_ctrl_blk *)blk);
+       kfree(blk);
 
        return 0;
 }
@@ -494,7 +494,7 @@ static int src_mgr_get_ctrl_blk(void **rblk)
 
 static int src_mgr_put_ctrl_blk(void *blk)
 {
-       kfree((struct src_mgr_ctrl_blk *)blk);
+       kfree(blk);
 
        return 0;
 }
@@ -515,7 +515,7 @@ static int srcimp_mgr_get_ctrl_blk(void **rblk)
 
 static int srcimp_mgr_put_ctrl_blk(void *blk)
 {
-       kfree((struct srcimp_mgr_ctrl_blk *)blk);
+       kfree(blk);
 
        return 0;
 }
@@ -702,7 +702,7 @@ static int amixer_rsc_get_ctrl_blk(void **rblk)
 
 static int amixer_rsc_put_ctrl_blk(void *blk)
 {
-       kfree((struct amixer_rsc_ctrl_blk *)blk);
+       kfree(blk);
 
        return 0;
 }
@@ -909,7 +909,7 @@ static int dai_get_ctrl_blk(void **rblk)
 
 static int dai_put_ctrl_blk(void *blk)
 {
-       kfree((struct dai_ctrl_blk *)blk);
+       kfree(blk);
 
        return 0;
 }
@@ -958,7 +958,7 @@ static int dao_get_ctrl_blk(void **rblk)
 
 static int dao_put_ctrl_blk(void *blk)
 {
-       kfree((struct dao_ctrl_blk *)blk);
+       kfree(blk);
 
        return 0;
 }
@@ -1156,7 +1156,7 @@ static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
 
 static int daio_mgr_put_ctrl_blk(void *blk)
 {
-       kfree((struct daio_mgr_ctrl_blk *)blk);
+       kfree(blk);
 
        return 0;
 }
index a34a2c9f4bcf4c12e20d0097a924fe67bf327090..86a632bf4d50a9bc70bd2250a57b45600fb0e52a 100644 (file)
@@ -2951,7 +2951,7 @@ static int hda_codec_runtime_resume(struct device *dev)
 static int hda_codec_force_resume(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
-       bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used;
+       bool forced_resume = hda_codec_need_resume(codec);
        int ret;
 
        /* The get/put pair below enforces the runtime resume even if the
index bd093593f8fbdff5b1735e81c53a6d386f673906..a5fab12defde2a648c469c45fdcc05f6f06bdffa 100644 (file)
@@ -1027,7 +1027,7 @@ static int azx_suspend(struct device *dev)
        chip = card->private_data;
        bus = azx_bus(chip);
        snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
-       __azx_runtime_suspend(chip);
+       pm_runtime_force_suspend(dev);
        if (bus->irq >= 0) {
                free_irq(bus->irq, chip);
                bus->irq = -1;
@@ -1044,7 +1044,9 @@ static int azx_suspend(struct device *dev)
 static int azx_resume(struct device *dev)
 {
        struct snd_card *card = dev_get_drvdata(dev);
+       struct hda_codec *codec;
        struct azx *chip;
+       bool forced_resume = false;
 
        if (!azx_is_pm_ready(card))
                return 0;
@@ -1055,7 +1057,20 @@ static int azx_resume(struct device *dev)
                        chip->msi = 0;
        if (azx_acquire_irq(chip, 1) < 0)
                return -EIO;
-       __azx_runtime_resume(chip, false);
+
+       /* check for the forced resume */
+       list_for_each_codec(codec, &chip->bus) {
+               if (hda_codec_need_resume(codec)) {
+                       forced_resume = true;
+                       break;
+               }
+       }
+
+       if (forced_resume)
+               pm_runtime_get_noresume(dev);
+       pm_runtime_force_resume(dev);
+       if (forced_resume)
+               pm_runtime_put(dev);
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
        trace_azx_resume(chip);
@@ -1071,6 +1086,8 @@ static int azx_freeze_noirq(struct device *dev)
        struct azx *chip = card->private_data;
        struct pci_dev *pci = to_pci_dev(dev);
 
+       if (!azx_is_pm_ready(card))
+               return 0;
        if (chip->driver_type == AZX_DRIVER_SKL)
                pci_set_power_state(pci, PCI_D3hot);
 
@@ -1083,6 +1100,8 @@ static int azx_thaw_noirq(struct device *dev)
        struct azx *chip = card->private_data;
        struct pci_dev *pci = to_pci_dev(dev);
 
+       if (!azx_is_pm_ready(card))
+               return 0;
        if (chip->driver_type == AZX_DRIVER_SKL)
                pci_set_power_state(pci, PCI_D0);
 
@@ -1098,12 +1117,12 @@ static int azx_runtime_suspend(struct device *dev)
        if (!azx_is_pm_ready(card))
                return 0;
        chip = card->private_data;
-       if (!azx_has_pm_runtime(chip))
-               return 0;
 
        /* enable controller wake up event */
-       azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
-                 STATESTS_INT_MASK);
+       if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) {
+               azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
+                          STATESTS_INT_MASK);
+       }
 
        __azx_runtime_suspend(chip);
        trace_azx_runtime_suspend(chip);
@@ -1114,17 +1133,18 @@ static int azx_runtime_resume(struct device *dev)
 {
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
+       bool from_rt = snd_power_get_state(card) == SNDRV_CTL_POWER_D0;
 
        if (!azx_is_pm_ready(card))
                return 0;
        chip = card->private_data;
-       if (!azx_has_pm_runtime(chip))
-               return 0;
-       __azx_runtime_resume(chip, true);
+       __azx_runtime_resume(chip, from_rt);
 
        /* disable controller Wake Up event*/
-       azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
-                       ~STATESTS_INT_MASK);
+       if (from_rt) {
+               azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
+                          ~STATESTS_INT_MASK);
+       }
 
        trace_azx_runtime_resume(chip);
        return 0;
@@ -1199,10 +1219,8 @@ static void azx_vs_set_state(struct pci_dev *pci,
                if (!disabled) {
                        dev_info(chip->card->dev,
                                 "Start delayed initialization\n");
-                       if (azx_probe_continue(chip) < 0) {
+                       if (azx_probe_continue(chip) < 0)
                                dev_err(chip->card->dev, "initialization error\n");
-                               hda->init_failed = true;
-                       }
                }
        } else {
                dev_info(chip->card->dev, "%s via vga_switcheroo\n",
@@ -1335,12 +1353,15 @@ static int register_vga_switcheroo(struct azx *chip)
 /*
  * destructor
  */
-static int azx_free(struct azx *chip)
+static void azx_free(struct azx *chip)
 {
        struct pci_dev *pci = chip->pci;
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
        struct hdac_bus *bus = azx_bus(chip);
 
+       if (hda->freed)
+               return;
+
        if (azx_has_pm_runtime(chip) && chip->running)
                pm_runtime_get_noresume(&pci->dev);
        chip->running = 0;
@@ -1384,9 +1405,8 @@ static int azx_free(struct azx *chip)
 
        if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT)
                snd_hdac_i915_exit(bus);
-       kfree(hda);
 
-       return 0;
+       hda->freed = 1;
 }
 
 static int azx_dev_disconnect(struct snd_device *device)
@@ -1402,7 +1422,8 @@ static int azx_dev_disconnect(struct snd_device *device)
 
 static int azx_dev_free(struct snd_device *device)
 {
-       return azx_free(device->device_data);
+       azx_free(device->device_data);
+       return 0;
 }
 
 #ifdef SUPPORT_VGA_SWITCHEROO
@@ -1769,7 +1790,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        if (err < 0)
                return err;
 
-       hda = kzalloc(sizeof(*hda), GFP_KERNEL);
+       hda = devm_kzalloc(&pci->dev, sizeof(*hda), GFP_KERNEL);
        if (!hda) {
                pci_disable_device(pci);
                return -ENOMEM;
@@ -1810,7 +1831,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
 
        err = azx_bus_init(chip, model[dev]);
        if (err < 0) {
-               kfree(hda);
                pci_disable_device(pci);
                return err;
        }
@@ -2005,7 +2025,7 @@ static int azx_first_init(struct azx *chip)
        /* codec detection */
        if (!azx_bus(chip)->codec_mask) {
                dev_err(card->dev, "no codecs found!\n");
-               return -ENODEV;
+               /* keep running the rest for the runtime PM */
        }
 
        if (azx_acquire_irq(chip, 0) < 0)
@@ -2027,24 +2047,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context)
 {
        struct snd_card *card = context;
        struct azx *chip = card->private_data;
-       struct pci_dev *pci = chip->pci;
-
-       if (!fw) {
-               dev_err(card->dev, "Cannot load firmware, aborting\n");
-               goto error;
-       }
 
-       chip->fw = fw;
+       if (fw)
+               chip->fw = fw;
+       else
+               dev_err(card->dev, "Cannot load firmware, continue without patching\n");
        if (!chip->disabled) {
                /* continue probing */
-               if (azx_probe_continue(chip))
-                       goto error;
+               azx_probe_continue(chip);
        }
-       return; /* OK */
-
- error:
-       snd_card_free(card);
-       pci_set_drvdata(pci, NULL);
 }
 #endif
 
@@ -2308,9 +2319,11 @@ static int azx_probe_continue(struct azx *chip)
 #endif
 
        /* create codec instances */
-       err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
-       if (err < 0)
-               goto out_free;
+       if (bus->codec_mask) {
+               err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
+               if (err < 0)
+                       goto out_free;
+       }
 
 #ifdef CONFIG_SND_HDA_PATCH_LOADER
        if (chip->fw) {
@@ -2324,7 +2337,7 @@ static int azx_probe_continue(struct azx *chip)
 #endif
        }
 #endif
-       if ((probe_only[dev] & 1) == 0) {
+       if (bus->codec_mask && !(probe_only[dev] & 1)) {
                err = azx_codec_configure(chip);
                if (err < 0)
                        goto out_free;
@@ -2341,17 +2354,23 @@ static int azx_probe_continue(struct azx *chip)
 
        set_default_power_save(chip);
 
-       if (azx_has_pm_runtime(chip))
+       if (azx_has_pm_runtime(chip)) {
+               pm_runtime_use_autosuspend(&pci->dev);
+               pm_runtime_allow(&pci->dev);
                pm_runtime_put_autosuspend(&pci->dev);
+       }
 
 out_free:
-       if (err < 0 || !hda->need_i915_power)
+       if (err < 0) {
+               azx_free(chip);
+               return err;
+       }
+
+       if (!hda->need_i915_power)
                display_power(chip, false);
-       if (err < 0)
-               hda->init_failed = 1;
        complete_all(&hda->probe_wait);
        to_hda_bus(bus)->bus_probing = 0;
-       return err;
+       return 0;
 }
 
 static void azx_remove(struct pci_dev *pci)
index 2acfff3da1a04797c6bb4a7d2880ad58b58ec621..3fb119f090408865e2a380292807469b9bd8d369 100644 (file)
@@ -27,6 +27,7 @@ struct hda_intel {
        unsigned int use_vga_switcheroo:1;
        unsigned int vga_switcheroo_registered:1;
        unsigned int init_failed:1; /* delayed init failed */
+       unsigned int freed:1; /* resources already released */
 
        bool need_i915_power:1; /* the hda controller needs i915 power */
 };
index de2826f90d343c06ad974ef79ca9cb3123c558d4..dc5557d79c43e079dbeb42b90ca8a96882f1d79e 100644 (file)
@@ -7378,6 +7378,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
index 721d12130d0cb4aae2b943e8fb75153212032331..e7b9040a54e65ba6e697934a0b649c389e58df29 100644 (file)
@@ -1457,7 +1457,7 @@ error:
                usb_audio_err(chip,
                        "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
                        UAC_GET_CUR, validx, idx, cval->val_type);
-               return ret;
+               return filter_error(cval, ret);
        }
 
        ucontrol->value.integer.value[0] = val;
@@ -1771,11 +1771,15 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer,
 
 /* Build a mixer control for a UAC connector control (jack-detect) */
 static void build_connector_control(struct usb_mixer_interface *mixer,
+                                   const struct usbmix_name_map *imap,
                                    struct usb_audio_term *term, bool is_input)
 {
        struct snd_kcontrol *kctl;
        struct usb_mixer_elem_info *cval;
 
+       if (check_ignored_ctl(find_map(imap, term->id, 0)))
+               return;
+
        cval = kzalloc(sizeof(*cval), GFP_KERNEL);
        if (!cval)
                return;
@@ -2109,8 +2113,9 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
        check_input_term(state, term_id, &iterm);
 
        /* Check for jack detection. */
-       if (uac_v2v3_control_is_readable(bmctls, control))
-               build_connector_control(state->mixer, &iterm, true);
+       if ((iterm.type & 0xff00) != 0x0100 &&
+           uac_v2v3_control_is_readable(bmctls, control))
+               build_connector_control(state->mixer, state->map, &iterm, true);
 
        return 0;
 }
@@ -3071,13 +3076,13 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
                memset(&iterm, 0, sizeof(iterm));
                iterm.id = UAC3_BADD_IT_ID4;
                iterm.type = UAC_BIDIR_TERMINAL_HEADSET;
-               build_connector_control(mixer, &iterm, true);
+               build_connector_control(mixer, map->map, &iterm, true);
 
                /* Output Term - Insertion control */
                memset(&oterm, 0, sizeof(oterm));
                oterm.id = UAC3_BADD_OT_ID3;
                oterm.type = UAC_BIDIR_TERMINAL_HEADSET;
-               build_connector_control(mixer, &oterm, false);
+               build_connector_control(mixer, map->map, &oterm, false);
        }
 
        return 0;
@@ -3106,7 +3111,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
                if (map->id == state.chip->usb_id) {
                        state.map = map->map;
                        state.selector_map = map->selector_map;
-                       mixer->ignore_ctl_error = map->ignore_ctl_error;
+                       mixer->ignore_ctl_error |= map->ignore_ctl_error;
                        break;
                }
        }
@@ -3149,10 +3154,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
                        if (err < 0 && err != -EINVAL)
                                return err;
 
-                       if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
+                       if ((state.oterm.type & 0xff00) != 0x0100 &&
+                           uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
                                                         UAC2_TE_CONNECTOR)) {
-                               build_connector_control(state.mixer, &state.oterm,
-                                                       false);
+                               build_connector_control(state.mixer, state.map,
+                                                       &state.oterm, false);
                        }
                } else {  /* UAC_VERSION_3 */
                        struct uac3_output_terminal_descriptor *desc = p;
@@ -3174,10 +3180,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
                        if (err < 0 && err != -EINVAL)
                                return err;
 
-                       if (uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls),
+                       if ((state.oterm.type & 0xff00) != 0x0100 &&
+                           uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls),
                                                         UAC3_TE_INSERTION)) {
-                               build_connector_control(state.mixer, &state.oterm,
-                                                       false);
+                               build_connector_control(state.mixer, state.map,
+                                                       &state.oterm, false);
                        }
                }
        }
index 72b575c348605aaf91511eeb590bcaea4a88f26b..b4e77000f441e96ed86e81e8ec3916290208e54e 100644 (file)
@@ -360,9 +360,11 @@ static const struct usbmix_name_map corsair_virtuoso_map[] = {
 };
 
 /* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX
- * response for Input Gain Pad (id=19, control=12).  Skip it.
+ * response for Input Gain Pad (id=19, control=12) and the connector status
+ * for SPDIF terminal (id=18).  Skip them.
  */
 static const struct usbmix_name_map asus_rog_map[] = {
+       { 18, NULL }, /* OT, connector control */
        { 19, NULL, 12 }, /* FU, Input Gain Pad */
        {}
 };
index f3327cb56edfe163d1a8fc0a45b89fb324573243..db189945e9b0c599b4bda0e44f2372f7140e2085 100644 (file)
 #define X86_FEATURE_IBRS               ( 7*32+25) /* Indirect Branch Restricted Speculation */
 #define X86_FEATURE_IBPB               ( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP              ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
 #define X86_FEATURE_L1TF_PTEINV                ( 7*32+29) /* "" L1TF workaround PTE inversion */
 #define X86_FEATURE_IBRS_ENHANCED      ( 7*32+30) /* Enhanced IBRS */
 #define X86_FEATURE_MSR_IA32_FEAT_CTL  ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
 #define X86_FEATURE_CQM_MBM_LOCAL      (11*32+ 3) /* LLC Local MBM monitoring */
 #define X86_FEATURE_FENCE_SWAPGS_USER  (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
 #define X86_FEATURE_FENCE_SWAPGS_KERNEL        (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
+#define X86_FEATURE_SPLIT_LOCK_DETECT  (11*32+ 6) /* #AC for split lock */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX512_BF16                (12*32+ 5) /* AVX512 BFLOAT16 instructions */
 #define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch Restricted Speculation */
 #define X86_FEATURE_AMD_STIBP          (13*32+15) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_AMD_STIBP_ALWAYS_ON        (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
+#define X86_FEATURE_AMD_PPIN           (13*32+23) /* Protected Processor Inventory Number */
 #define X86_FEATURE_AMD_SSBD           (13*32+24) /* "" Speculative Store Bypass Disable */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
 #define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 #define X86_FEATURE_INTEL_STIBP                (18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_FLUSH_L1D          (18*32+28) /* Flush L1D cache */
 #define X86_FEATURE_ARCH_CAPABILITIES  (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_CORE_CAPABILITIES  (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */
 #define X86_FEATURE_SPEC_CTRL_SSBD     (18*32+31) /* "" Speculative Store Bypass Disable */
 
 /*
index d5e517d1c3ddc5c9ac6e594500d87524168b143d..12c9684d59ba6fea4df36257c7f1b8eb89ab5943 100644 (file)
 
 /* Intel MSRs. Some also available on other CPUs */
 
+#define MSR_TEST_CTRL                          0x00000033
+#define MSR_TEST_CTRL_SPLIT_LOCK_DETECT_BIT    29
+#define MSR_TEST_CTRL_SPLIT_LOCK_DETECT                BIT(MSR_TEST_CTRL_SPLIT_LOCK_DETECT_BIT)
+
 #define MSR_IA32_SPEC_CTRL             0x00000048 /* Speculation Control */
 #define SPEC_CTRL_IBRS                 BIT(0)     /* Indirect Branch Restricted Speculation */
 #define SPEC_CTRL_STIBP_SHIFT          1          /* Single Thread Indirect Branch Predictor (STIBP) bit */
  */
 #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK      (~0x03U)
 
+/* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */
+#define MSR_IA32_CORE_CAPS                       0x000000cf
+#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT  5
+#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT     BIT(MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT)
+
 #define MSR_PKG_CST_CONFIG_CONTROL     0x000000e2
 #define NHM_C3_AUTO_DEMOTE             (1UL << 25)
 #define NHM_C1_AUTO_DEMOTE             (1UL << 26)
index 2a7befbd11ad54de08160cf47dd970c7a6175e81..0fe0d584c57ea5aa2a9f915120fa24ff78d972ad 100644 (file)
@@ -591,6 +591,8 @@ int do_struct_ops(int argc, char **argv)
 
        err = cmd_select(cmds, argc, argv, do_help);
 
-       btf__free(btf_vmlinux);
+       if (!IS_ERR(btf_vmlinux))
+               btf__free(btf_vmlinux);
+
        return err;
 }
index 669d69441a625c1639d0699671b9ee41405e84a1..4671fbf28842718fa74c9664dee74f8e129dbb7e 100644 (file)
@@ -3,9 +3,9 @@
 #define __LINUX_BITS_H
 
 #include <linux/const.h>
+#include <vdso/bits.h>
 #include <asm/bitsperlong.h>
 
-#define BIT(nr)                        (UL(1) << (nr))
 #define BIT_ULL(nr)            (ULL(1) << (nr))
 #define BIT_MASK(nr)           (UL(1) << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#define GENMASK(h, l) \
+#if !defined(__ASSEMBLY__) && \
+       (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#include <linux/build_bug.h>
+#define GENMASK_INPUT_CHECK(h, l) \
+       (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+               __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+#else
+/*
+ * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+ * disable the input check if that is the case.
+ */
+#define GENMASK_INPUT_CHECK(h, l) 0
+#endif
+
+#define __GENMASK(h, l) \
        (((~UL(0)) - (UL(1) << (l)) + 1) & \
         (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK(h, l) \
+       (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
 
-#define GENMASK_ULL(h, l) \
+#define __GENMASK_ULL(h, l) \
        (((~ULL(0)) - (ULL(1) << (l)) + 1) & \
         (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) \
+       (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
 
 #endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/build_bug.h b/tools/include/linux/build_bug.h
new file mode 100644 (file)
index 0000000..cc7070c
--- /dev/null
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILD_BUG_H
+#define _LINUX_BUILD_BUG_H
+
+#include <linux/compiler.h>
+
+#ifdef __CHECKER__
+#define BUILD_BUG_ON_ZERO(e) (0)
+#else /* __CHECKER__ */
+/*
+ * Force a compilation error if condition is true, but also produce a
+ * result (of value 0 and type int), so the expression can be used
+ * e.g. in a structure initializer (or where-ever else comma expressions
+ * aren't permitted).
+ */
+#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
+#endif /* __CHECKER__ */
+
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n)       \
+       BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n)                 \
+       BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
+
+/*
+ * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
+ * expression but avoids the generation of any code, even if that expression
+ * has side-effects.
+ */
+#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
+
+/**
+ * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied
+ *                   error message.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * See BUILD_BUG_ON for description.
+ */
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ */
+#define BUILD_BUG_ON(condition) \
+       BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+
+/**
+ * BUILD_BUG - break compile if used.
+ *
+ * If you have some code that you expect the compiler to eliminate at
+ * build time, you should use BUILD_BUG to detect if it is
+ * unexpectedly used.
+ */
+#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
+
+/**
+ * static_assert - check integer constant expression at build time
+ *
+ * static_assert() is a wrapper for the C11 _Static_assert, with a
+ * little macro magic to make the message optional (defaulting to the
+ * stringification of the tested expression).
+ *
+ * Contrary to BUILD_BUG_ON(), static_assert() can be used at global
+ * scope, but requires the expression to be an integer constant
+ * expression (i.e., it is not enough that __builtin_constant_p() is
+ * true for expr).
+ *
+ * Also note that BUILD_BUG_ON() fails the build if the condition is
+ * true, while static_assert() fails the build if the expression is
+ * false.
+ */
+#ifndef static_assert
+#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+#endif // static_assert
+
+#endif /* _LINUX_BUILD_BUG_H */
index 1827c2f973f93c533ca9a3e12ced636d13319556..180f7714a5f10c75e53ffd883b0b6f680cdb6a37 100644 (file)
 # define __compiletime_error(message)
 #endif
 
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix)          \
+       do {                                                            \
+               extern void prefix ## suffix(void) __compiletime_error(msg); \
+               if (!(condition))                                       \
+                       prefix ## suffix();                             \
+       } while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+       __compiletime_assert(condition, msg, prefix, suffix)
+
+/**
+ * compiletime_assert - break build and emit msg if condition is false
+ * @condition: a compile-time constant condition to check
+ * @msg:       a message to emit if condition is false
+ *
+ * In tradition of POSIX assert, this macro will break the build if the
+ * supplied condition is *false*, emitting the supplied error message if the
+ * compiler has support to do so.
+ */
+#define compiletime_assert(condition, msg) \
+       _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+
 /* Optimization barrier */
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
index 7b55a55f59112ccaec93486899f42b8619be8729..81b8aae5a8559c9fabf0665c48d8e8659db4f239 100644 (file)
@@ -1,9 +1,6 @@
 #ifndef _LINUX_CONST_H
 #define _LINUX_CONST_H
 
-#include <uapi/linux/const.h>
-
-#define UL(x)          (_UL(x))
-#define ULL(x)         (_ULL(x))
+#include <vdso/const.h>
 
 #endif /* _LINUX_CONST_H */
index cba226948a0cec1794a5756c25c4f75c5d7dde76..a7e54a08fb54c41b7b5da231119caab66d5431cc 100644 (file)
@@ -5,6 +5,7 @@
 #include <stdarg.h>
 #include <stddef.h>
 #include <assert.h>
+#include <linux/build_bug.h>
 #include <linux/compiler.h>
 #include <endian.h>
 #include <byteswap.h>
@@ -35,9 +36,6 @@
        (type *)((char *)__mptr - offsetof(type, member)); })
 #endif
 
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
-
 #ifndef max
 #define max(x, y) ({                           \
        typeof(x) _max1 = (x);                  \
index 868bf7996c0f8920e589b58b192528c5a602a9fe..808b48a93330bad83759a3cd81331f812296a12a 100644 (file)
@@ -948,6 +948,8 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_TRANSFER     DRM_IOWR(0xCC, struct drm_syncobj_transfer)
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL      DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
 
+#define DRM_IOCTL_MODE_GETFB2          DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
index 829c0a48577f8b942b7efbf4d7a6438c7d72a670..2813e579b480cc5e6c10e85663b64ac7dafe9102 100644 (file)
@@ -1619,6 +1619,27 @@ struct drm_i915_gem_context_param {
  * By default, new contexts allow persistence.
  */
 #define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/*
+ * I915_CONTEXT_PARAM_RINGSIZE:
+ *
+ * Sets the size of the CS ringbuffer to use for logical ring contexts. This
+ * applies a limit of how many batches can be queued to HW before the caller
+ * is blocked due to lack of space for more commands.
+ *
+ * Only reliably possible to be set prior to first use, i.e. during
+ * construction. At any later point, the current execution must be flushed as
+ * the ring can only be changed while the context is idle. Note, the ringsize
+ * can be specified as a constructor property, see
+ * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
+ *
+ * Only applies to the current set of engine and lost when those engines
+ * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
+ *
+ * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
+ * Default is 16 KiB.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE    0xc
 /* Must be kept compact -- no holes and well documented */
 
        __u64 value;
index 0d8a6f47711c32eef4701ad9d9d6936f3a01d9c9..a10e3cdc283948980c5dfa0b3bc050eddc9cd2ec 100644 (file)
@@ -163,6 +163,7 @@ struct fscrypt_get_key_status_arg {
 #define FS_IOC_REMOVE_ENCRYPTION_KEY           _IOWR('f', 24, struct fscrypt_remove_key_arg)
 #define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
 #define FS_IOC_GET_ENCRYPTION_KEY_STATUS       _IOWR('f', 26, struct fscrypt_get_key_status_arg)
+#define FS_IOC_GET_ENCRYPTION_NONCE            _IOR('f', 27, __u8[16])
 
 /**********************************************************************/
 
index 4b95f9a31a2f5e227f57f4cbba907c0508c5e3a9..428c7dde6b4b3761f9df65ed9181b50e1c9f7165 100644 (file)
@@ -474,12 +474,17 @@ struct kvm_s390_mem_op {
        __u32 size;             /* amount of bytes */
        __u32 op;               /* type of operation */
        __u64 buf;              /* buffer in userspace */
-       __u8 ar;                /* the access register number */
-       __u8 reserved[31];      /* should be set to 0 */
+       union {
+               __u8 ar;        /* the access register number */
+               __u32 sida_offset; /* offset into the sida */
+               __u8 reserved[32]; /* should be set to 0 */
+       };
 };
 /* types for kvm_s390_mem_op->op */
 #define KVM_S390_MEMOP_LOGICAL_READ    0
 #define KVM_S390_MEMOP_LOGICAL_WRITE   1
+#define KVM_S390_MEMOP_SIDA_READ       2
+#define KVM_S390_MEMOP_SIDA_WRITE      3
 /* flags for kvm_s390_mem_op->flags */
 #define KVM_S390_MEMOP_F_CHECK_ONLY            (1ULL << 0)
 #define KVM_S390_MEMOP_F_INJECT_EXCEPTION      (1ULL << 1)
@@ -1010,6 +1015,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_NISV_TO_USER 177
 #define KVM_CAP_ARM_INJECT_EXT_DABT 178
 #define KVM_CAP_S390_VCPU_RESETS 179
+#define KVM_CAP_S390_PROTECTED 180
+#define KVM_CAP_PPC_SECURE_GUEST 181
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1478,6 +1485,39 @@ struct kvm_enc_region {
 #define KVM_S390_NORMAL_RESET  _IO(KVMIO,   0xc3)
 #define KVM_S390_CLEAR_RESET   _IO(KVMIO,   0xc4)
 
+struct kvm_s390_pv_sec_parm {
+       __u64 origin;
+       __u64 length;
+};
+
+struct kvm_s390_pv_unp {
+       __u64 addr;
+       __u64 size;
+       __u64 tweak;
+};
+
+enum pv_cmd_id {
+       KVM_PV_ENABLE,
+       KVM_PV_DISABLE,
+       KVM_PV_SET_SEC_PARMS,
+       KVM_PV_UNPACK,
+       KVM_PV_VERIFY,
+       KVM_PV_PREP_RESET,
+       KVM_PV_UNSHARE_ALL,
+};
+
+struct kvm_pv_cmd {
+       __u32 cmd;      /* Command to be executed */
+       __u16 rc;       /* Ultravisor return code */
+       __u16 rrc;      /* Ultravisor return reason code */
+       __u64 data;     /* Data or address */
+       __u32 flags;    /* flags for future extensions. Must be 0 for now */
+       __u32 reserved[3];
+};
+
+/* Available with KVM_CAP_S390_PROTECTED */
+#define KVM_S390_PV_COMMAND            _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
+
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
        /* Guest initialization commands */
@@ -1628,4 +1668,7 @@ struct kvm_hyperv_eventfd {
 #define KVM_HYPERV_CONN_ID_MASK                0x00ffffff
 #define KVM_HYPERV_EVENTFD_DEASSIGN    (1 << 0)
 
+#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE    (1 << 0)
+#define KVM_DIRTY_LOG_INITIALLY_SET            (1 << 1)
+
 #endif /* __LINUX_KVM_H */
index fc1a64c3447bf6e329bba5e155053332794a0187..923cc162609c016dd6360d834b34abfe6e1ba459 100644 (file)
@@ -5,8 +5,9 @@
 #include <asm/mman.h>
 #include <asm-generic/hugetlb_encode.h>
 
-#define MREMAP_MAYMOVE 1
-#define MREMAP_FIXED   2
+#define MREMAP_MAYMOVE         1
+#define MREMAP_FIXED           2
+#define MREMAP_DONTUNMAP       4
 
 #define OVERCOMMIT_GUESS               0
 #define OVERCOMMIT_ALWAYS              1
index 2e3bc22c6f202f6280ef7279de60fa966b84d3ed..3bac0a8ceab26ee78c37b425a76115aa21c20c06 100644 (file)
@@ -35,6 +35,7 @@
 
 /* Flags for the clone3() syscall. */
 #define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */
 
 /*
  * cloning flags intersect with CSIGNAL so can be used with unshare and clone3
@@ -81,6 +82,8 @@
  * @set_tid_size: This defines the size of the array referenced
  *                in @set_tid. This cannot be larger than the
  *                kernel's limit of nested PID namespaces.
+ * @cgroup:       If CLONE_INTO_CGROUP is specified set this to
+ *                a file descriptor for the cgroup.
  *
  * The structure is versioned by size and thus extensible.
  * New struct members must go at the end of the struct and
@@ -97,11 +100,13 @@ struct clone_args {
        __aligned_u64 tls;
        __aligned_u64 set_tid;
        __aligned_u64 set_tid_size;
+       __aligned_u64 cgroup;
 };
 #endif
 
 #define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
 #define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
+#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
 
 /*
  * Scheduling policies
index 40d028eed645954cbc3e4699aa2c353be371ce7f..9fe72e4b1373165d7a7aeff6141082dd0e8269e6 100644 (file)
 #define VHOST_VSOCK_SET_GUEST_CID      _IOW(VHOST_VIRTIO, 0x60, __u64)
 #define VHOST_VSOCK_SET_RUNNING                _IOW(VHOST_VIRTIO, 0x61, int)
 
+/* VHOST_VDPA specific defines */
+
+/* Get the device id. The device ids follow the same definition of
+ * the device id defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_DEVICE_ID       _IOR(VHOST_VIRTIO, 0x70, __u32)
+/* Get and set the status. The status bits follow the same definition
+ * of the device status defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_STATUS          _IOR(VHOST_VIRTIO, 0x71, __u8)
+#define VHOST_VDPA_SET_STATUS          _IOW(VHOST_VIRTIO, 0x72, __u8)
+/* Get and set the device config. The device config follows the same
+ * definition of the device config defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_CONFIG          _IOR(VHOST_VIRTIO, 0x73, \
+                                            struct vhost_vdpa_config)
+#define VHOST_VDPA_SET_CONFIG          _IOW(VHOST_VIRTIO, 0x74, \
+                                            struct vhost_vdpa_config)
+/* Enable/disable the ring. */
+#define VHOST_VDPA_SET_VRING_ENABLE    _IOW(VHOST_VIRTIO, 0x75, \
+                                            struct vhost_vring_state)
+/* Get the max ring size. */
+#define VHOST_VDPA_GET_VRING_NUM       _IOR(VHOST_VIRTIO, 0x76, __u16)
+
 #endif
diff --git a/tools/include/vdso/bits.h b/tools/include/vdso/bits.h
new file mode 100644 (file)
index 0000000..6d005a1
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_BITS_H
+#define __VDSO_BITS_H
+
+#include <vdso/const.h>
+
+#define BIT(nr)                        (UL(1) << (nr))
+
+#endif /* __VDSO_BITS_H */
diff --git a/tools/include/vdso/const.h b/tools/include/vdso/const.h
new file mode 100644 (file)
index 0000000..94b385a
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_CONST_H
+#define __VDSO_CONST_H
+
+#include <uapi/linux/const.h>
+
+#define UL(x)          (_UL(x))
+#define ULL(x)         (_ULL(x))
+
+#endif /* __VDSO_CONST_H */
index ff9174282a8c6afdfeb79015a7ccbe0c7b33ac7b..8f480e29a6b07734edbb1f54f6fd7458d79b4220 100644 (file)
@@ -178,6 +178,8 @@ struct bpf_capabilities {
        __u32 array_mmap:1;
        /* BTF_FUNC_GLOBAL is supported */
        __u32 btf_func_global:1;
+       /* kernel support for expected_attach_type in BPF_PROG_LOAD */
+       __u32 exp_attach_type:1;
 };
 
 enum reloc_type {
@@ -194,6 +196,22 @@ struct reloc_desc {
        int sym_off;
 };
 
+struct bpf_sec_def;
+
+typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
+                                       struct bpf_program *prog);
+
+struct bpf_sec_def {
+       const char *sec;
+       size_t len;
+       enum bpf_prog_type prog_type;
+       enum bpf_attach_type expected_attach_type;
+       bool is_exp_attach_type_optional;
+       bool is_attachable;
+       bool is_attach_btf;
+       attach_fn_t attach_fn;
+};
+
 /*
  * bpf_prog should be a better name but it has been used in
  * linux/filter.h.
@@ -204,6 +222,7 @@ struct bpf_program {
        char *name;
        int prog_ifindex;
        char *section_name;
+       const struct bpf_sec_def *sec_def;
        /* section_name with / replaced by _; makes recursive pinning
         * in bpf_object__pin_programs easier
         */
@@ -3315,6 +3334,37 @@ static int bpf_object__probe_array_mmap(struct bpf_object *obj)
        return 0;
 }
 
+static int
+bpf_object__probe_exp_attach_type(struct bpf_object *obj)
+{
+       struct bpf_load_program_attr attr;
+       struct bpf_insn insns[] = {
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       };
+       int fd;
+
+       memset(&attr, 0, sizeof(attr));
+       /* use any valid combination of program type and (optional)
+        * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
+        * to see if kernel supports expected_attach_type field for
+        * BPF_PROG_LOAD command
+        */
+       attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
+       attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
+       attr.insns = insns;
+       attr.insns_cnt = ARRAY_SIZE(insns);
+       attr.license = "GPL";
+
+       fd = bpf_load_program_xattr(&attr, NULL, 0);
+       if (fd >= 0) {
+               obj->caps.exp_attach_type = 1;
+               close(fd);
+               return 1;
+       }
+       return 0;
+}
+
 static int
 bpf_object__probe_caps(struct bpf_object *obj)
 {
@@ -3325,6 +3375,7 @@ bpf_object__probe_caps(struct bpf_object *obj)
                bpf_object__probe_btf_func_global,
                bpf_object__probe_btf_datasec,
                bpf_object__probe_array_mmap,
+               bpf_object__probe_exp_attach_type,
        };
        int i, ret;
 
@@ -4861,7 +4912,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
 
        memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
        load_attr.prog_type = prog->type;
-       load_attr.expected_attach_type = prog->expected_attach_type;
+       /* old kernels might not support specifying expected_attach_type */
+       if (!prog->caps->exp_attach_type && prog->sec_def &&
+           prog->sec_def->is_exp_attach_type_optional)
+               load_attr.expected_attach_type = 0;
+       else
+               load_attr.expected_attach_type = prog->expected_attach_type;
        if (prog->caps->name)
                load_attr.name = prog->name;
        load_attr.insns = insns;
@@ -5062,6 +5118,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
        return 0;
 }
 
+static const struct bpf_sec_def *find_sec_def(const char *sec_name);
+
 static struct bpf_object *
 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
                   const struct bpf_object_open_opts *opts)
@@ -5117,24 +5175,17 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
        bpf_object__elf_finish(obj);
 
        bpf_object__for_each_program(prog, obj) {
-               enum bpf_prog_type prog_type;
-               enum bpf_attach_type attach_type;
-
-               if (prog->type != BPF_PROG_TYPE_UNSPEC)
-                       continue;
-
-               err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
-                                              &attach_type);
-               if (err == -ESRCH)
+               prog->sec_def = find_sec_def(prog->section_name);
+               if (!prog->sec_def)
                        /* couldn't guess, but user might manually specify */
                        continue;
-               if (err)
-                       goto out;
 
-               bpf_program__set_type(prog, prog_type);
-               bpf_program__set_expected_attach_type(prog, attach_type);
-               if (prog_type == BPF_PROG_TYPE_TRACING ||
-                   prog_type == BPF_PROG_TYPE_EXT)
+               bpf_program__set_type(prog, prog->sec_def->prog_type);
+               bpf_program__set_expected_attach_type(prog,
+                               prog->sec_def->expected_attach_type);
+
+               if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
+                   prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
                        prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
        }
 
@@ -6223,23 +6274,32 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
        prog->expected_attach_type = type;
 }
 
-#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
-       { string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
+#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional,          \
+                         attachable, attach_btf)                           \
+       {                                                                   \
+               .sec = string,                                              \
+               .len = sizeof(string) - 1,                                  \
+               .prog_type = ptype,                                         \
+               .expected_attach_type = eatype,                             \
+               .is_exp_attach_type_optional = eatype_optional,             \
+               .is_attachable = attachable,                                \
+               .is_attach_btf = attach_btf,                                \
+       }
 
 /* Programs that can NOT be attached. */
 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
 
 /* Programs that can be attached. */
 #define BPF_APROG_SEC(string, ptype, atype) \
-       BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
+       BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
 
 /* Programs that must specify expected attach type at load time. */
 #define BPF_EAPROG_SEC(string, ptype, eatype) \
-       BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
+       BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
 
 /* Programs that use BTF to identify attach point */
 #define BPF_PROG_BTF(string, ptype, eatype) \
-       BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
+       BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
 
 /* Programs that can be attached but attach type can't be identified by section
  * name. Kept for backward compatibility.
@@ -6253,11 +6313,6 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
        __VA_ARGS__                                                         \
 }
 
-struct bpf_sec_def;
-
-typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
-                                       struct bpf_program *prog);
-
 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
                                      struct bpf_program *prog);
 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
@@ -6269,17 +6324,6 @@ static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
                                   struct bpf_program *prog);
 
-struct bpf_sec_def {
-       const char *sec;
-       size_t len;
-       enum bpf_prog_type prog_type;
-       enum bpf_attach_type expected_attach_type;
-       bool is_attachable;
-       bool is_attach_btf;
-       enum bpf_attach_type attach_type;
-       attach_fn_t attach_fn;
-};
-
 static const struct bpf_sec_def section_defs[] = {
        BPF_PROG_SEC("socket",                  BPF_PROG_TYPE_SOCKET_FILTER),
        BPF_PROG_SEC("sk_reuseport",            BPF_PROG_TYPE_SK_REUSEPORT),
@@ -6713,7 +6757,7 @@ int libbpf_attach_type_by_name(const char *name,
                        continue;
                if (!section_defs[i].is_attachable)
                        return -EINVAL;
-               *attach_type = section_defs[i].attach_type;
+               *attach_type = section_defs[i].expected_attach_type;
                return 0;
        }
        pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
@@ -7542,7 +7586,6 @@ static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
 struct bpf_link *
 bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
 {
-       const struct bpf_sec_def *sec_def;
        enum bpf_attach_type attach_type;
        char errmsg[STRERR_BUFSIZE];
        struct bpf_link *link;
@@ -7561,11 +7604,6 @@ bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
        link->detach = &bpf_link__detach_fd;
 
        attach_type = bpf_program__get_expected_attach_type(prog);
-       if (!attach_type) {
-               sec_def = find_sec_def(bpf_program__title(prog, false));
-               if (sec_def)
-                       attach_type = sec_def->attach_type;
-       }
        link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, NULL);
        if (link_fd < 0) {
                link_fd = -errno;
index 44df1d3e7287fbae5f84eb248ee9ca14e66e403c..f1dacecb1619fe02768f2ea94d2eed2b4e1367f6 100644 (file)
@@ -458,7 +458,7 @@ struct xdp_link_info {
 
 struct bpf_xdp_set_link_opts {
        size_t sz;
-       __u32 old_fd;
+       int old_fd;
 };
 #define bpf_xdp_set_link_opts__last_field old_fd
 
index 18b5319025e1986de84050d55b0e579d44173b48..0b709fd10bbaaa8dbc7e663422ba2887c0e9a0ce 100644 (file)
@@ -142,7 +142,7 @@ static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
                struct ifinfomsg ifinfo;
                char             attrbuf[64];
        } req;
-       __u32 nl_pid;
+       __u32 nl_pid = 0;
 
        sock = libbpf_netlink_open(&nl_pid);
        if (sock < 0)
@@ -288,7 +288,7 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
 {
        struct xdp_id_md xdp_id = {};
        int sock, ret;
-       __u32 nl_pid;
+       __u32 nl_pid = 0;
        __u32 mask;
 
        if (flags & ~XDP_FLAGS_MASK || !info_size)
@@ -321,7 +321,7 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
 
 static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
 {
-       if (info->attach_mode != XDP_ATTACHED_MULTI)
+       if (info->attach_mode != XDP_ATTACHED_MULTI && !flags)
                return info->prog_id;
        if (flags & XDP_FLAGS_DRV_MODE)
                return info->drv_prog_id;
index 8dd01f986fbb5867ce4b24ebd0950eca401a01b5..4b170fd08a28b194a6e3c006c9bcb1237dd6a246 100644 (file)
@@ -1050,10 +1050,7 @@ static struct rela *find_jump_table(struct objtool_file *file,
         * it.
         */
        for (;
-            &insn->list != &file->insn_list &&
-            insn->sec == func->sec &&
-            insn->offset >= func->offset;
-
+            &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func;
             insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
 
                if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
@@ -2008,8 +2005,8 @@ static int validate_return(struct symbol *func, struct instruction *insn, struct
        }
 
        if (state->bp_scratch) {
-               WARN("%s uses BP as a scratch register",
-                    func->name);
+               WARN_FUNC("BP used as a scratch register",
+                         insn->sec, insn->offset);
                return 1;
        }
 
@@ -2364,14 +2361,27 @@ static bool ignore_unreachable_insn(struct instruction *insn)
            !strcmp(insn->sec->name, ".altinstr_aux"))
                return true;
 
+       if (!insn->func)
+               return false;
+
+       /*
+        * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
+        * __builtin_unreachable().  The BUG() macro has an unreachable() after
+        * the UD2, which causes GCC's undefined trap logic to emit another UD2
+        * (or occasionally a JMP to UD2).
+        */
+       if (list_prev_entry(insn, list)->dead_end &&
+           (insn->type == INSN_BUG ||
+            (insn->type == INSN_JUMP_UNCONDITIONAL &&
+             insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
+               return true;
+
        /*
         * Check if this (or a subsequent) instruction is related to
         * CONFIG_UBSAN or CONFIG_KASAN.
         *
         * End the search at 5 instructions to avoid going into the weeds.
         */
-       if (!insn->func)
-               return false;
        for (i = 0; i < 5; i++) {
 
                if (is_kasan_insn(insn) || is_ubsan_insn(insn))
index 13ccf775a83a462e66da0f15dae47471262419ba..ba4cbb1cdd632413aa7b4ca3603b6c2543674908 100644 (file)
@@ -66,7 +66,7 @@ int orc_dump(const char *_objname)
        char *name;
        size_t nr_sections;
        Elf64_Addr orc_ip_addr = 0;
-       size_t shstrtab_idx;
+       size_t shstrtab_idx, strtab_idx = 0;
        Elf *elf;
        Elf_Scn *scn;
        GElf_Shdr sh;
@@ -127,6 +127,8 @@ int orc_dump(const char *_objname)
 
                if (!strcmp(name, ".symtab")) {
                        symtab = data;
+               } else if (!strcmp(name, ".strtab")) {
+                       strtab_idx = i;
                } else if (!strcmp(name, ".orc_unwind")) {
                        orc = data->d_buf;
                        orc_size = sh.sh_size;
@@ -138,7 +140,7 @@ int orc_dump(const char *_objname)
                }
        }
 
-       if (!symtab || !orc || !orc_ip)
+       if (!symtab || !strtab_idx || !orc || !orc_ip)
                return 0;
 
        if (orc_size % sizeof(*orc) != 0) {
@@ -159,21 +161,29 @@ int orc_dump(const char *_objname)
                                return -1;
                        }
 
-                       scn = elf_getscn(elf, sym.st_shndx);
-                       if (!scn) {
-                               WARN_ELF("elf_getscn");
-                               return -1;
-                       }
-
-                       if (!gelf_getshdr(scn, &sh)) {
-                               WARN_ELF("gelf_getshdr");
-                               return -1;
-                       }
-
-                       name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
-                       if (!name || !*name) {
-                               WARN_ELF("elf_strptr");
-                               return -1;
+                       if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
+                               scn = elf_getscn(elf, sym.st_shndx);
+                               if (!scn) {
+                                       WARN_ELF("elf_getscn");
+                                       return -1;
+                               }
+
+                               if (!gelf_getshdr(scn, &sh)) {
+                                       WARN_ELF("gelf_getshdr");
+                                       return -1;
+                               }
+
+                               name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+                               if (!name) {
+                                       WARN_ELF("elf_strptr");
+                                       return -1;
+                               }
+                       } else {
+                               name = elf_strptr(elf, strtab_idx, sym.st_name);
+                               if (!name) {
+                                       WARN_ELF("elf_strptr");
+                                       return -1;
+                               }
                        }
 
                        printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
index 41e4a2754da4a2e2a96992f4df5e0c3a755c9562..4c0dabd280002771f8d28675088d230ed9b70e60 100644 (file)
@@ -88,11 +88,6 @@ static int create_orc_entry(struct elf *elf, struct section *u_sec, struct secti
        struct orc_entry *orc;
        struct rela *rela;
 
-       if (!insn_sec->sym) {
-               WARN("missing symbol for section %s", insn_sec->name);
-               return -1;
-       }
-
        /* populate ORC data */
        orc = (struct orc_entry *)u_sec->data->d_buf + idx;
        memcpy(orc, o, sizeof(*orc));
@@ -105,8 +100,32 @@ static int create_orc_entry(struct elf *elf, struct section *u_sec, struct secti
        }
        memset(rela, 0, sizeof(*rela));
 
-       rela->sym = insn_sec->sym;
-       rela->addend = insn_off;
+       if (insn_sec->sym) {
+               rela->sym = insn_sec->sym;
+               rela->addend = insn_off;
+       } else {
+               /*
+                * The Clang assembler doesn't produce section symbols, so we
+                * have to reference the function symbol instead:
+                */
+               rela->sym = find_symbol_containing(insn_sec, insn_off);
+               if (!rela->sym) {
+                       /*
+                        * Hack alert.  This happens when we need to reference
+                        * the NOP pad insn immediately after the function.
+                        */
+                       rela->sym = find_symbol_containing(insn_sec,
+                                                          insn_off - 1);
+               }
+               if (!rela->sym) {
+                       WARN("missing symbol for insn at offset 0x%lx\n",
+                            insn_off);
+                       return -1;
+               }
+
+               rela->addend = insn_off - rela->sym->offset;
+       }
+
        rela->type = R_X86_64_PC32;
        rela->offset = idx * sizeof(int);
        rela->sec = ip_relasec;
index 44d510bc9b7877a18c082ceb168f01e94db0417b..37b844f839bc4f4b07f292a26ee5d5987ccdcd1c 100644 (file)
 #
 # The abi is "common", "64" or "x32" for this file.
 #
-0      common  read                    __x64_sys_read
-1      common  write                   __x64_sys_write
-2      common  open                    __x64_sys_open
-3      common  close                   __x64_sys_close
-4      common  stat                    __x64_sys_newstat
-5      common  fstat                   __x64_sys_newfstat
-6      common  lstat                   __x64_sys_newlstat
-7      common  poll                    __x64_sys_poll
-8      common  lseek                   __x64_sys_lseek
-9      common  mmap                    __x64_sys_mmap
-10     common  mprotect                __x64_sys_mprotect
-11     common  munmap                  __x64_sys_munmap
-12     common  brk                     __x64_sys_brk
-13     64      rt_sigaction            __x64_sys_rt_sigaction
-14     common  rt_sigprocmask          __x64_sys_rt_sigprocmask
-15     64      rt_sigreturn            __x64_sys_rt_sigreturn/ptregs
-16     64      ioctl                   __x64_sys_ioctl
-17     common  pread64                 __x64_sys_pread64
-18     common  pwrite64                __x64_sys_pwrite64
-19     64      readv                   __x64_sys_readv
-20     64      writev                  __x64_sys_writev
-21     common  access                  __x64_sys_access
-22     common  pipe                    __x64_sys_pipe
-23     common  select                  __x64_sys_select
-24     common  sched_yield             __x64_sys_sched_yield
-25     common  mremap                  __x64_sys_mremap
-26     common  msync                   __x64_sys_msync
-27     common  mincore                 __x64_sys_mincore
-28     common  madvise                 __x64_sys_madvise
-29     common  shmget                  __x64_sys_shmget
-30     common  shmat                   __x64_sys_shmat
-31     common  shmctl                  __x64_sys_shmctl
-32     common  dup                     __x64_sys_dup
-33     common  dup2                    __x64_sys_dup2
-34     common  pause                   __x64_sys_pause
-35     common  nanosleep               __x64_sys_nanosleep
-36     common  getitimer               __x64_sys_getitimer
-37     common  alarm                   __x64_sys_alarm
-38     common  setitimer               __x64_sys_setitimer
-39     common  getpid                  __x64_sys_getpid
-40     common  sendfile                __x64_sys_sendfile64
-41     common  socket                  __x64_sys_socket
-42     common  connect                 __x64_sys_connect
-43     common  accept                  __x64_sys_accept
-44     common  sendto                  __x64_sys_sendto
-45     64      recvfrom                __x64_sys_recvfrom
-46     64      sendmsg                 __x64_sys_sendmsg
-47     64      recvmsg                 __x64_sys_recvmsg
-48     common  shutdown                __x64_sys_shutdown
-49     common  bind                    __x64_sys_bind
-50     common  listen                  __x64_sys_listen
-51     common  getsockname             __x64_sys_getsockname
-52     common  getpeername             __x64_sys_getpeername
-53     common  socketpair              __x64_sys_socketpair
-54     64      setsockopt              __x64_sys_setsockopt
-55     64      getsockopt              __x64_sys_getsockopt
-56     common  clone                   __x64_sys_clone/ptregs
-57     common  fork                    __x64_sys_fork/ptregs
-58     common  vfork                   __x64_sys_vfork/ptregs
-59     64      execve                  __x64_sys_execve/ptregs
-60     common  exit                    __x64_sys_exit
-61     common  wait4                   __x64_sys_wait4
-62     common  kill                    __x64_sys_kill
-63     common  uname                   __x64_sys_newuname
-64     common  semget                  __x64_sys_semget
-65     common  semop                   __x64_sys_semop
-66     common  semctl                  __x64_sys_semctl
-67     common  shmdt                   __x64_sys_shmdt
-68     common  msgget                  __x64_sys_msgget
-69     common  msgsnd                  __x64_sys_msgsnd
-70     common  msgrcv                  __x64_sys_msgrcv
-71     common  msgctl                  __x64_sys_msgctl
-72     common  fcntl                   __x64_sys_fcntl
-73     common  flock                   __x64_sys_flock
-74     common  fsync                   __x64_sys_fsync
-75     common  fdatasync               __x64_sys_fdatasync
-76     common  truncate                __x64_sys_truncate
-77     common  ftruncate               __x64_sys_ftruncate
-78     common  getdents                __x64_sys_getdents
-79     common  getcwd                  __x64_sys_getcwd
-80     common  chdir                   __x64_sys_chdir
-81     common  fchdir                  __x64_sys_fchdir
-82     common  rename                  __x64_sys_rename
-83     common  mkdir                   __x64_sys_mkdir
-84     common  rmdir                   __x64_sys_rmdir
-85     common  creat                   __x64_sys_creat
-86     common  link                    __x64_sys_link
-87     common  unlink                  __x64_sys_unlink
-88     common  symlink                 __x64_sys_symlink
-89     common  readlink                __x64_sys_readlink
-90     common  chmod                   __x64_sys_chmod
-91     common  fchmod                  __x64_sys_fchmod
-92     common  chown                   __x64_sys_chown
-93     common  fchown                  __x64_sys_fchown
-94     common  lchown                  __x64_sys_lchown
-95     common  umask                   __x64_sys_umask
-96     common  gettimeofday            __x64_sys_gettimeofday
-97     common  getrlimit               __x64_sys_getrlimit
-98     common  getrusage               __x64_sys_getrusage
-99     common  sysinfo                 __x64_sys_sysinfo
-100    common  times                   __x64_sys_times
-101    64      ptrace                  __x64_sys_ptrace
-102    common  getuid                  __x64_sys_getuid
-103    common  syslog                  __x64_sys_syslog
-104    common  getgid                  __x64_sys_getgid
-105    common  setuid                  __x64_sys_setuid
-106    common  setgid                  __x64_sys_setgid
-107    common  geteuid                 __x64_sys_geteuid
-108    common  getegid                 __x64_sys_getegid
-109    common  setpgid                 __x64_sys_setpgid
-110    common  getppid                 __x64_sys_getppid
-111    common  getpgrp                 __x64_sys_getpgrp
-112    common  setsid                  __x64_sys_setsid
-113    common  setreuid                __x64_sys_setreuid
-114    common  setregid                __x64_sys_setregid
-115    common  getgroups               __x64_sys_getgroups
-116    common  setgroups               __x64_sys_setgroups
-117    common  setresuid               __x64_sys_setresuid
-118    common  getresuid               __x64_sys_getresuid
-119    common  setresgid               __x64_sys_setresgid
-120    common  getresgid               __x64_sys_getresgid
-121    common  getpgid                 __x64_sys_getpgid
-122    common  setfsuid                __x64_sys_setfsuid
-123    common  setfsgid                __x64_sys_setfsgid
-124    common  getsid                  __x64_sys_getsid
-125    common  capget                  __x64_sys_capget
-126    common  capset                  __x64_sys_capset
-127    64      rt_sigpending           __x64_sys_rt_sigpending
-128    64      rt_sigtimedwait         __x64_sys_rt_sigtimedwait
-129    64      rt_sigqueueinfo         __x64_sys_rt_sigqueueinfo
-130    common  rt_sigsuspend           __x64_sys_rt_sigsuspend
-131    64      sigaltstack             __x64_sys_sigaltstack
-132    common  utime                   __x64_sys_utime
-133    common  mknod                   __x64_sys_mknod
+0      common  read                    sys_read
+1      common  write                   sys_write
+2      common  open                    sys_open
+3      common  close                   sys_close
+4      common  stat                    sys_newstat
+5      common  fstat                   sys_newfstat
+6      common  lstat                   sys_newlstat
+7      common  poll                    sys_poll
+8      common  lseek                   sys_lseek
+9      common  mmap                    sys_mmap
+10     common  mprotect                sys_mprotect
+11     common  munmap                  sys_munmap
+12     common  brk                     sys_brk
+13     64      rt_sigaction            sys_rt_sigaction
+14     common  rt_sigprocmask          sys_rt_sigprocmask
+15     64      rt_sigreturn            sys_rt_sigreturn
+16     64      ioctl                   sys_ioctl
+17     common  pread64                 sys_pread64
+18     common  pwrite64                sys_pwrite64
+19     64      readv                   sys_readv
+20     64      writev                  sys_writev
+21     common  access                  sys_access
+22     common  pipe                    sys_pipe
+23     common  select                  sys_select
+24     common  sched_yield             sys_sched_yield
+25     common  mremap                  sys_mremap
+26     common  msync                   sys_msync
+27     common  mincore                 sys_mincore
+28     common  madvise                 sys_madvise
+29     common  shmget                  sys_shmget
+30     common  shmat                   sys_shmat
+31     common  shmctl                  sys_shmctl
+32     common  dup                     sys_dup
+33     common  dup2                    sys_dup2
+34     common  pause                   sys_pause
+35     common  nanosleep               sys_nanosleep
+36     common  getitimer               sys_getitimer
+37     common  alarm                   sys_alarm
+38     common  setitimer               sys_setitimer
+39     common  getpid                  sys_getpid
+40     common  sendfile                sys_sendfile64
+41     common  socket                  sys_socket
+42     common  connect                 sys_connect
+43     common  accept                  sys_accept
+44     common  sendto                  sys_sendto
+45     64      recvfrom                sys_recvfrom
+46     64      sendmsg                 sys_sendmsg
+47     64      recvmsg                 sys_recvmsg
+48     common  shutdown                sys_shutdown
+49     common  bind                    sys_bind
+50     common  listen                  sys_listen
+51     common  getsockname             sys_getsockname
+52     common  getpeername             sys_getpeername
+53     common  socketpair              sys_socketpair
+54     64      setsockopt              sys_setsockopt
+55     64      getsockopt              sys_getsockopt
+56     common  clone                   sys_clone
+57     common  fork                    sys_fork
+58     common  vfork                   sys_vfork
+59     64      execve                  sys_execve
+60     common  exit                    sys_exit
+61     common  wait4                   sys_wait4
+62     common  kill                    sys_kill
+63     common  uname                   sys_newuname
+64     common  semget                  sys_semget
+65     common  semop                   sys_semop
+66     common  semctl                  sys_semctl
+67     common  shmdt                   sys_shmdt
+68     common  msgget                  sys_msgget
+69     common  msgsnd                  sys_msgsnd
+70     common  msgrcv                  sys_msgrcv
+71     common  msgctl                  sys_msgctl
+72     common  fcntl                   sys_fcntl
+73     common  flock                   sys_flock
+74     common  fsync                   sys_fsync
+75     common  fdatasync               sys_fdatasync
+76     common  truncate                sys_truncate
+77     common  ftruncate               sys_ftruncate
+78     common  getdents                sys_getdents
+79     common  getcwd                  sys_getcwd
+80     common  chdir                   sys_chdir
+81     common  fchdir                  sys_fchdir
+82     common  rename                  sys_rename
+83     common  mkdir                   sys_mkdir
+84     common  rmdir                   sys_rmdir
+85     common  creat                   sys_creat
+86     common  link                    sys_link
+87     common  unlink                  sys_unlink
+88     common  symlink                 sys_symlink
+89     common  readlink                sys_readlink
+90     common  chmod                   sys_chmod
+91     common  fchmod                  sys_fchmod
+92     common  chown                   sys_chown
+93     common  fchown                  sys_fchown
+94     common  lchown                  sys_lchown
+95     common  umask                   sys_umask
+96     common  gettimeofday            sys_gettimeofday
+97     common  getrlimit               sys_getrlimit
+98     common  getrusage               sys_getrusage
+99     common  sysinfo                 sys_sysinfo
+100    common  times                   sys_times
+101    64      ptrace                  sys_ptrace
+102    common  getuid                  sys_getuid
+103    common  syslog                  sys_syslog
+104    common  getgid                  sys_getgid
+105    common  setuid                  sys_setuid
+106    common  setgid                  sys_setgid
+107    common  geteuid                 sys_geteuid
+108    common  getegid                 sys_getegid
+109    common  setpgid                 sys_setpgid
+110    common  getppid                 sys_getppid
+111    common  getpgrp                 sys_getpgrp
+112    common  setsid                  sys_setsid
+113    common  setreuid                sys_setreuid
+114    common  setregid                sys_setregid
+115    common  getgroups               sys_getgroups
+116    common  setgroups               sys_setgroups
+117    common  setresuid               sys_setresuid
+118    common  getresuid               sys_getresuid
+119    common  setresgid               sys_setresgid
+120    common  getresgid               sys_getresgid
+121    common  getpgid                 sys_getpgid
+122    common  setfsuid                sys_setfsuid
+123    common  setfsgid                sys_setfsgid
+124    common  getsid                  sys_getsid
+125    common  capget                  sys_capget
+126    common  capset                  sys_capset
+127    64      rt_sigpending           sys_rt_sigpending
+128    64      rt_sigtimedwait         sys_rt_sigtimedwait
+129    64      rt_sigqueueinfo         sys_rt_sigqueueinfo
+130    common  rt_sigsuspend           sys_rt_sigsuspend
+131    64      sigaltstack             sys_sigaltstack
+132    common  utime                   sys_utime
+133    common  mknod                   sys_mknod
 134    64      uselib
-135    common  personality             __x64_sys_personality
-136    common  ustat                   __x64_sys_ustat
-137    common  statfs                  __x64_sys_statfs
-138    common  fstatfs                 __x64_sys_fstatfs
-139    common  sysfs                   __x64_sys_sysfs
-140    common  getpriority             __x64_sys_getpriority
-141    common  setpriority             __x64_sys_setpriority
-142    common  sched_setparam          __x64_sys_sched_setparam
-143    common  sched_getparam          __x64_sys_sched_getparam
-144    common  sched_setscheduler      __x64_sys_sched_setscheduler
-145    common  sched_getscheduler      __x64_sys_sched_getscheduler
-146    common  sched_get_priority_max  __x64_sys_sched_get_priority_max
-147    common  sched_get_priority_min  __x64_sys_sched_get_priority_min
-148    common  sched_rr_get_interval   __x64_sys_sched_rr_get_interval
-149    common  mlock                   __x64_sys_mlock
-150    common  munlock                 __x64_sys_munlock
-151    common  mlockall                __x64_sys_mlockall
-152    common  munlockall              __x64_sys_munlockall
-153    common  vhangup                 __x64_sys_vhangup
-154    common  modify_ldt              __x64_sys_modify_ldt
-155    common  pivot_root              __x64_sys_pivot_root
-156    64      _sysctl                 __x64_sys_sysctl
-157    common  prctl                   __x64_sys_prctl
-158    common  arch_prctl              __x64_sys_arch_prctl
-159    common  adjtimex                __x64_sys_adjtimex
-160    common  setrlimit               __x64_sys_setrlimit
-161    common  chroot                  __x64_sys_chroot
-162    common  sync                    __x64_sys_sync
-163    common  acct                    __x64_sys_acct
-164    common  settimeofday            __x64_sys_settimeofday
-165    common  mount                   __x64_sys_mount
-166    common  umount2                 __x64_sys_umount
-167    common  swapon                  __x64_sys_swapon
-168    common  swapoff                 __x64_sys_swapoff
-169    common  reboot                  __x64_sys_reboot
-170    common  sethostname             __x64_sys_sethostname
-171    common  setdomainname           __x64_sys_setdomainname
-172    common  iopl                    __x64_sys_iopl/ptregs
-173    common  ioperm                  __x64_sys_ioperm
+135    common  personality             sys_personality
+136    common  ustat                   sys_ustat
+137    common  statfs                  sys_statfs
+138    common  fstatfs                 sys_fstatfs
+139    common  sysfs                   sys_sysfs
+140    common  getpriority             sys_getpriority
+141    common  setpriority             sys_setpriority
+142    common  sched_setparam          sys_sched_setparam
+143    common  sched_getparam          sys_sched_getparam
+144    common  sched_setscheduler      sys_sched_setscheduler
+145    common  sched_getscheduler      sys_sched_getscheduler
+146    common  sched_get_priority_max  sys_sched_get_priority_max
+147    common  sched_get_priority_min  sys_sched_get_priority_min
+148    common  sched_rr_get_interval   sys_sched_rr_get_interval
+149    common  mlock                   sys_mlock
+150    common  munlock                 sys_munlock
+151    common  mlockall                sys_mlockall
+152    common  munlockall              sys_munlockall
+153    common  vhangup                 sys_vhangup
+154    common  modify_ldt              sys_modify_ldt
+155    common  pivot_root              sys_pivot_root
+156    64      _sysctl                 sys_sysctl
+157    common  prctl                   sys_prctl
+158    common  arch_prctl              sys_arch_prctl
+159    common  adjtimex                sys_adjtimex
+160    common  setrlimit               sys_setrlimit
+161    common  chroot                  sys_chroot
+162    common  sync                    sys_sync
+163    common  acct                    sys_acct
+164    common  settimeofday            sys_settimeofday
+165    common  mount                   sys_mount
+166    common  umount2                 sys_umount
+167    common  swapon                  sys_swapon
+168    common  swapoff                 sys_swapoff
+169    common  reboot                  sys_reboot
+170    common  sethostname             sys_sethostname
+171    common  setdomainname           sys_setdomainname
+172    common  iopl                    sys_iopl
+173    common  ioperm                  sys_ioperm
 174    64      create_module
-175    common  init_module             __x64_sys_init_module
-176    common  delete_module           __x64_sys_delete_module
+175    common  init_module             sys_init_module
+176    common  delete_module           sys_delete_module
 177    64      get_kernel_syms
 178    64      query_module
-179    common  quotactl                __x64_sys_quotactl
+179    common  quotactl                sys_quotactl
 180    64      nfsservctl
 181    common  getpmsg
 182    common  putpmsg
 183    common  afs_syscall
 184    common  tuxcall
 185    common  security
-186    common  gettid                  __x64_sys_gettid
-187    common  readahead               __x64_sys_readahead
-188    common  setxattr                __x64_sys_setxattr
-189    common  lsetxattr               __x64_sys_lsetxattr
-190    common  fsetxattr               __x64_sys_fsetxattr
-191    common  getxattr                __x64_sys_getxattr
-192    common  lgetxattr               __x64_sys_lgetxattr
-193    common  fgetxattr               __x64_sys_fgetxattr
-194    common  listxattr               __x64_sys_listxattr
-195    common  llistxattr              __x64_sys_llistxattr
-196    common  flistxattr              __x64_sys_flistxattr
-197    common  removexattr             __x64_sys_removexattr
-198    common  lremovexattr            __x64_sys_lremovexattr
-199    common  fremovexattr            __x64_sys_fremovexattr
-200    common  tkill                   __x64_sys_tkill
-201    common  time                    __x64_sys_time
-202    common  futex                   __x64_sys_futex
-203    common  sched_setaffinity       __x64_sys_sched_setaffinity
-204    common  sched_getaffinity       __x64_sys_sched_getaffinity
+186    common  gettid                  sys_gettid
+187    common  readahead               sys_readahead
+188    common  setxattr                sys_setxattr
+189    common  lsetxattr               sys_lsetxattr
+190    common  fsetxattr               sys_fsetxattr
+191    common  getxattr                sys_getxattr
+192    common  lgetxattr               sys_lgetxattr
+193    common  fgetxattr               sys_fgetxattr
+194    common  listxattr               sys_listxattr
+195    common  llistxattr              sys_llistxattr
+196    common  flistxattr              sys_flistxattr
+197    common  removexattr             sys_removexattr
+198    common  lremovexattr            sys_lremovexattr
+199    common  fremovexattr            sys_fremovexattr
+200    common  tkill                   sys_tkill
+201    common  time                    sys_time
+202    common  futex                   sys_futex
+203    common  sched_setaffinity       sys_sched_setaffinity
+204    common  sched_getaffinity       sys_sched_getaffinity
 205    64      set_thread_area
-206    64      io_setup                __x64_sys_io_setup
-207    common  io_destroy              __x64_sys_io_destroy
-208    common  io_getevents            __x64_sys_io_getevents
-209    64      io_submit               __x64_sys_io_submit
-210    common  io_cancel               __x64_sys_io_cancel
+206    64      io_setup                sys_io_setup
+207    common  io_destroy              sys_io_destroy
+208    common  io_getevents            sys_io_getevents
+209    64      io_submit               sys_io_submit
+210    common  io_cancel               sys_io_cancel
 211    64      get_thread_area
-212    common  lookup_dcookie          __x64_sys_lookup_dcookie
-213    common  epoll_create            __x64_sys_epoll_create
+212    common  lookup_dcookie          sys_lookup_dcookie
+213    common  epoll_create            sys_epoll_create
 214    64      epoll_ctl_old
 215    64      epoll_wait_old
-216    common  remap_file_pages        __x64_sys_remap_file_pages
-217    common  getdents64              __x64_sys_getdents64
-218    common  set_tid_address         __x64_sys_set_tid_address
-219    common  restart_syscall         __x64_sys_restart_syscall
-220    common  semtimedop              __x64_sys_semtimedop
-221    common  fadvise64               __x64_sys_fadvise64
-222    64      timer_create            __x64_sys_timer_create
-223    common  timer_settime           __x64_sys_timer_settime
-224    common  timer_gettime           __x64_sys_timer_gettime
-225    common  timer_getoverrun        __x64_sys_timer_getoverrun
-226    common  timer_delete            __x64_sys_timer_delete
-227    common  clock_settime           __x64_sys_clock_settime
-228    common  clock_gettime           __x64_sys_clock_gettime
-229    common  clock_getres            __x64_sys_clock_getres
-230    common  clock_nanosleep         __x64_sys_clock_nanosleep
-231    common  exit_group              __x64_sys_exit_group
-232    common  epoll_wait              __x64_sys_epoll_wait
-233    common  epoll_ctl               __x64_sys_epoll_ctl
-234    common  tgkill                  __x64_sys_tgkill
-235    common  utimes                  __x64_sys_utimes
+216    common  remap_file_pages        sys_remap_file_pages
+217    common  getdents64              sys_getdents64
+218    common  set_tid_address         sys_set_tid_address
+219    common  restart_syscall         sys_restart_syscall
+220    common  semtimedop              sys_semtimedop
+221    common  fadvise64               sys_fadvise64
+222    64      timer_create            sys_timer_create
+223    common  timer_settime           sys_timer_settime
+224    common  timer_gettime           sys_timer_gettime
+225    common  timer_getoverrun        sys_timer_getoverrun
+226    common  timer_delete            sys_timer_delete
+227    common  clock_settime           sys_clock_settime
+228    common  clock_gettime           sys_clock_gettime
+229    common  clock_getres            sys_clock_getres
+230    common  clock_nanosleep         sys_clock_nanosleep
+231    common  exit_group              sys_exit_group
+232    common  epoll_wait              sys_epoll_wait
+233    common  epoll_ctl               sys_epoll_ctl
+234    common  tgkill                  sys_tgkill
+235    common  utimes                  sys_utimes
 236    64      vserver
-237    common  mbind                   __x64_sys_mbind
-238    common  set_mempolicy           __x64_sys_set_mempolicy
-239    common  get_mempolicy           __x64_sys_get_mempolicy
-240    common  mq_open                 __x64_sys_mq_open
-241    common  mq_unlink               __x64_sys_mq_unlink
-242    common  mq_timedsend            __x64_sys_mq_timedsend
-243    common  mq_timedreceive         __x64_sys_mq_timedreceive
-244    64      mq_notify               __x64_sys_mq_notify
-245    common  mq_getsetattr           __x64_sys_mq_getsetattr
-246    64      kexec_load              __x64_sys_kexec_load
-247    64      waitid                  __x64_sys_waitid
-248    common  add_key                 __x64_sys_add_key
-249    common  request_key             __x64_sys_request_key
-250    common  keyctl                  __x64_sys_keyctl
-251    common  ioprio_set              __x64_sys_ioprio_set
-252    common  ioprio_get              __x64_sys_ioprio_get
-253    common  inotify_init            __x64_sys_inotify_init
-254    common  inotify_add_watch       __x64_sys_inotify_add_watch
-255    common  inotify_rm_watch        __x64_sys_inotify_rm_watch
-256    common  migrate_pages           __x64_sys_migrate_pages
-257    common  openat                  __x64_sys_openat
-258    common  mkdirat                 __x64_sys_mkdirat
-259    common  mknodat                 __x64_sys_mknodat
-260    common  fchownat                __x64_sys_fchownat
-261    common  futimesat               __x64_sys_futimesat
-262    common  newfstatat              __x64_sys_newfstatat
-263    common  unlinkat                __x64_sys_unlinkat
-264    common  renameat                __x64_sys_renameat
-265    common  linkat                  __x64_sys_linkat
-266    common  symlinkat               __x64_sys_symlinkat
-267    common  readlinkat              __x64_sys_readlinkat
-268    common  fchmodat                __x64_sys_fchmodat
-269    common  faccessat               __x64_sys_faccessat
-270    common  pselect6                __x64_sys_pselect6
-271    common  ppoll                   __x64_sys_ppoll
-272    common  unshare                 __x64_sys_unshare
-273    64      set_robust_list         __x64_sys_set_robust_list
-274    64      get_robust_list         __x64_sys_get_robust_list
-275    common  splice                  __x64_sys_splice
-276    common  tee                     __x64_sys_tee
-277    common  sync_file_range         __x64_sys_sync_file_range
-278    64      vmsplice                __x64_sys_vmsplice
-279    64      move_pages              __x64_sys_move_pages
-280    common  utimensat               __x64_sys_utimensat
-281    common  epoll_pwait             __x64_sys_epoll_pwait
-282    common  signalfd                __x64_sys_signalfd
-283    common  timerfd_create          __x64_sys_timerfd_create
-284    common  eventfd                 __x64_sys_eventfd
-285    common  fallocate               __x64_sys_fallocate
-286    common  timerfd_settime         __x64_sys_timerfd_settime
-287    common  timerfd_gettime         __x64_sys_timerfd_gettime
-288    common  accept4                 __x64_sys_accept4
-289    common  signalfd4               __x64_sys_signalfd4
-290    common  eventfd2                __x64_sys_eventfd2
-291    common  epoll_create1           __x64_sys_epoll_create1
-292    common  dup3                    __x64_sys_dup3
-293    common  pipe2                   __x64_sys_pipe2
-294    common  inotify_init1           __x64_sys_inotify_init1
-295    64      preadv                  __x64_sys_preadv
-296    64      pwritev                 __x64_sys_pwritev
-297    64      rt_tgsigqueueinfo       __x64_sys_rt_tgsigqueueinfo
-298    common  perf_event_open         __x64_sys_perf_event_open
-299    64      recvmmsg                __x64_sys_recvmmsg
-300    common  fanotify_init           __x64_sys_fanotify_init
-301    common  fanotify_mark           __x64_sys_fanotify_mark
-302    common  prlimit64               __x64_sys_prlimit64
-303    common  name_to_handle_at       __x64_sys_name_to_handle_at
-304    common  open_by_handle_at       __x64_sys_open_by_handle_at
-305    common  clock_adjtime           __x64_sys_clock_adjtime
-306    common  syncfs                  __x64_sys_syncfs
-307    64      sendmmsg                __x64_sys_sendmmsg
-308    common  setns                   __x64_sys_setns
-309    common  getcpu                  __x64_sys_getcpu
-310    64      process_vm_readv        __x64_sys_process_vm_readv
-311    64      process_vm_writev       __x64_sys_process_vm_writev
-312    common  kcmp                    __x64_sys_kcmp
-313    common  finit_module            __x64_sys_finit_module
-314    common  sched_setattr           __x64_sys_sched_setattr
-315    common  sched_getattr           __x64_sys_sched_getattr
-316    common  renameat2               __x64_sys_renameat2
-317    common  seccomp                 __x64_sys_seccomp
-318    common  getrandom               __x64_sys_getrandom
-319    common  memfd_create            __x64_sys_memfd_create
-320    common  kexec_file_load         __x64_sys_kexec_file_load
-321    common  bpf                     __x64_sys_bpf
-322    64      execveat                __x64_sys_execveat/ptregs
-323    common  userfaultfd             __x64_sys_userfaultfd
-324    common  membarrier              __x64_sys_membarrier
-325    common  mlock2                  __x64_sys_mlock2
-326    common  copy_file_range         __x64_sys_copy_file_range
-327    64      preadv2                 __x64_sys_preadv2
-328    64      pwritev2                __x64_sys_pwritev2
-329    common  pkey_mprotect           __x64_sys_pkey_mprotect
-330    common  pkey_alloc              __x64_sys_pkey_alloc
-331    common  pkey_free               __x64_sys_pkey_free
-332    common  statx                   __x64_sys_statx
-333    common  io_pgetevents           __x64_sys_io_pgetevents
-334    common  rseq                    __x64_sys_rseq
+237    common  mbind                   sys_mbind
+238    common  set_mempolicy           sys_set_mempolicy
+239    common  get_mempolicy           sys_get_mempolicy
+240    common  mq_open                 sys_mq_open
+241    common  mq_unlink               sys_mq_unlink
+242    common  mq_timedsend            sys_mq_timedsend
+243    common  mq_timedreceive         sys_mq_timedreceive
+244    64      mq_notify               sys_mq_notify
+245    common  mq_getsetattr           sys_mq_getsetattr
+246    64      kexec_load              sys_kexec_load
+247    64      waitid                  sys_waitid
+248    common  add_key                 sys_add_key
+249    common  request_key             sys_request_key
+250    common  keyctl                  sys_keyctl
+251    common  ioprio_set              sys_ioprio_set
+252    common  ioprio_get              sys_ioprio_get
+253    common  inotify_init            sys_inotify_init
+254    common  inotify_add_watch       sys_inotify_add_watch
+255    common  inotify_rm_watch        sys_inotify_rm_watch
+256    common  migrate_pages           sys_migrate_pages
+257    common  openat                  sys_openat
+258    common  mkdirat                 sys_mkdirat
+259    common  mknodat                 sys_mknodat
+260    common  fchownat                sys_fchownat
+261    common  futimesat               sys_futimesat
+262    common  newfstatat              sys_newfstatat
+263    common  unlinkat                sys_unlinkat
+264    common  renameat                sys_renameat
+265    common  linkat                  sys_linkat
+266    common  symlinkat               sys_symlinkat
+267    common  readlinkat              sys_readlinkat
+268    common  fchmodat                sys_fchmodat
+269    common  faccessat               sys_faccessat
+270    common  pselect6                sys_pselect6
+271    common  ppoll                   sys_ppoll
+272    common  unshare                 sys_unshare
+273    64      set_robust_list         sys_set_robust_list
+274    64      get_robust_list         sys_get_robust_list
+275    common  splice                  sys_splice
+276    common  tee                     sys_tee
+277    common  sync_file_range         sys_sync_file_range
+278    64      vmsplice                sys_vmsplice
+279    64      move_pages              sys_move_pages
+280    common  utimensat               sys_utimensat
+281    common  epoll_pwait             sys_epoll_pwait
+282    common  signalfd                sys_signalfd
+283    common  timerfd_create          sys_timerfd_create
+284    common  eventfd                 sys_eventfd
+285    common  fallocate               sys_fallocate
+286    common  timerfd_settime         sys_timerfd_settime
+287    common  timerfd_gettime         sys_timerfd_gettime
+288    common  accept4                 sys_accept4
+289    common  signalfd4               sys_signalfd4
+290    common  eventfd2                sys_eventfd2
+291    common  epoll_create1           sys_epoll_create1
+292    common  dup3                    sys_dup3
+293    common  pipe2                   sys_pipe2
+294    common  inotify_init1           sys_inotify_init1
+295    64      preadv                  sys_preadv
+296    64      pwritev                 sys_pwritev
+297    64      rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo
+298    common  perf_event_open         sys_perf_event_open
+299    64      recvmmsg                sys_recvmmsg
+300    common  fanotify_init           sys_fanotify_init
+301    common  fanotify_mark           sys_fanotify_mark
+302    common  prlimit64               sys_prlimit64
+303    common  name_to_handle_at       sys_name_to_handle_at
+304    common  open_by_handle_at       sys_open_by_handle_at
+305    common  clock_adjtime           sys_clock_adjtime
+306    common  syncfs                  sys_syncfs
+307    64      sendmmsg                sys_sendmmsg
+308    common  setns                   sys_setns
+309    common  getcpu                  sys_getcpu
+310    64      process_vm_readv        sys_process_vm_readv
+311    64      process_vm_writev       sys_process_vm_writev
+312    common  kcmp                    sys_kcmp
+313    common  finit_module            sys_finit_module
+314    common  sched_setattr           sys_sched_setattr
+315    common  sched_getattr           sys_sched_getattr
+316    common  renameat2               sys_renameat2
+317    common  seccomp                 sys_seccomp
+318    common  getrandom               sys_getrandom
+319    common  memfd_create            sys_memfd_create
+320    common  kexec_file_load         sys_kexec_file_load
+321    common  bpf                     sys_bpf
+322    64      execveat                sys_execveat
+323    common  userfaultfd             sys_userfaultfd
+324    common  membarrier              sys_membarrier
+325    common  mlock2                  sys_mlock2
+326    common  copy_file_range         sys_copy_file_range
+327    64      preadv2                 sys_preadv2
+328    64      pwritev2                sys_pwritev2
+329    common  pkey_mprotect           sys_pkey_mprotect
+330    common  pkey_alloc              sys_pkey_alloc
+331    common  pkey_free               sys_pkey_free
+332    common  statx                   sys_statx
+333    common  io_pgetevents           sys_io_pgetevents
+334    common  rseq                    sys_rseq
 # don't use numbers 387 through 423, add new calls after the last
 # 'common' entry
-424    common  pidfd_send_signal       __x64_sys_pidfd_send_signal
-425    common  io_uring_setup          __x64_sys_io_uring_setup
-426    common  io_uring_enter          __x64_sys_io_uring_enter
-427    common  io_uring_register       __x64_sys_io_uring_register
-428    common  open_tree               __x64_sys_open_tree
-429    common  move_mount              __x64_sys_move_mount
-430    common  fsopen                  __x64_sys_fsopen
-431    common  fsconfig                __x64_sys_fsconfig
-432    common  fsmount                 __x64_sys_fsmount
-433    common  fspick                  __x64_sys_fspick
-434    common  pidfd_open              __x64_sys_pidfd_open
-435    common  clone3                  __x64_sys_clone3/ptregs
-437    common  openat2                 __x64_sys_openat2
-438    common  pidfd_getfd             __x64_sys_pidfd_getfd
+424    common  pidfd_send_signal       sys_pidfd_send_signal
+425    common  io_uring_setup          sys_io_uring_setup
+426    common  io_uring_enter          sys_io_uring_enter
+427    common  io_uring_register       sys_io_uring_register
+428    common  open_tree               sys_open_tree
+429    common  move_mount              sys_move_mount
+430    common  fsopen                  sys_fsopen
+431    common  fsconfig                sys_fsconfig
+432    common  fsmount                 sys_fsmount
+433    common  fspick                  sys_fspick
+434    common  pidfd_open              sys_pidfd_open
+435    common  clone3                  sys_clone3
+437    common  openat2                 sys_openat2
+438    common  pidfd_getfd             sys_pidfd_getfd
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 # on-the-fly for compat_sys_*() compatibility system calls if X86_X32
 # is defined.
 #
-512    x32     rt_sigaction            __x32_compat_sys_rt_sigaction
-513    x32     rt_sigreturn            sys32_x32_rt_sigreturn
-514    x32     ioctl                   __x32_compat_sys_ioctl
-515    x32     readv                   __x32_compat_sys_readv
-516    x32     writev                  __x32_compat_sys_writev
-517    x32     recvfrom                __x32_compat_sys_recvfrom
-518    x32     sendmsg                 __x32_compat_sys_sendmsg
-519    x32     recvmsg                 __x32_compat_sys_recvmsg
-520    x32     execve                  __x32_compat_sys_execve/ptregs
-521    x32     ptrace                  __x32_compat_sys_ptrace
-522    x32     rt_sigpending           __x32_compat_sys_rt_sigpending
-523    x32     rt_sigtimedwait         __x32_compat_sys_rt_sigtimedwait_time64
-524    x32     rt_sigqueueinfo         __x32_compat_sys_rt_sigqueueinfo
-525    x32     sigaltstack             __x32_compat_sys_sigaltstack
-526    x32     timer_create            __x32_compat_sys_timer_create
-527    x32     mq_notify               __x32_compat_sys_mq_notify
-528    x32     kexec_load              __x32_compat_sys_kexec_load
-529    x32     waitid                  __x32_compat_sys_waitid
-530    x32     set_robust_list         __x32_compat_sys_set_robust_list
-531    x32     get_robust_list         __x32_compat_sys_get_robust_list
-532    x32     vmsplice                __x32_compat_sys_vmsplice
-533    x32     move_pages              __x32_compat_sys_move_pages
-534    x32     preadv                  __x32_compat_sys_preadv64
-535    x32     pwritev                 __x32_compat_sys_pwritev64
-536    x32     rt_tgsigqueueinfo       __x32_compat_sys_rt_tgsigqueueinfo
-537    x32     recvmmsg                __x32_compat_sys_recvmmsg_time64
-538    x32     sendmmsg                __x32_compat_sys_sendmmsg
-539    x32     process_vm_readv        __x32_compat_sys_process_vm_readv
-540    x32     process_vm_writev       __x32_compat_sys_process_vm_writev
-541    x32     setsockopt              __x32_compat_sys_setsockopt
-542    x32     getsockopt              __x32_compat_sys_getsockopt
-543    x32     io_setup                __x32_compat_sys_io_setup
-544    x32     io_submit               __x32_compat_sys_io_submit
-545    x32     execveat                __x32_compat_sys_execveat/ptregs
-546    x32     preadv2                 __x32_compat_sys_preadv64v2
-547    x32     pwritev2                __x32_compat_sys_pwritev64v2
+512    x32     rt_sigaction            compat_sys_rt_sigaction
+513    x32     rt_sigreturn            compat_sys_x32_rt_sigreturn
+514    x32     ioctl                   compat_sys_ioctl
+515    x32     readv                   compat_sys_readv
+516    x32     writev                  compat_sys_writev
+517    x32     recvfrom                compat_sys_recvfrom
+518    x32     sendmsg                 compat_sys_sendmsg
+519    x32     recvmsg                 compat_sys_recvmsg
+520    x32     execve                  compat_sys_execve
+521    x32     ptrace                  compat_sys_ptrace
+522    x32     rt_sigpending           compat_sys_rt_sigpending
+523    x32     rt_sigtimedwait         compat_sys_rt_sigtimedwait_time64
+524    x32     rt_sigqueueinfo         compat_sys_rt_sigqueueinfo
+525    x32     sigaltstack             compat_sys_sigaltstack
+526    x32     timer_create            compat_sys_timer_create
+527    x32     mq_notify               compat_sys_mq_notify
+528    x32     kexec_load              compat_sys_kexec_load
+529    x32     waitid                  compat_sys_waitid
+530    x32     set_robust_list         compat_sys_set_robust_list
+531    x32     get_robust_list         compat_sys_get_robust_list
+532    x32     vmsplice                compat_sys_vmsplice
+533    x32     move_pages              compat_sys_move_pages
+534    x32     preadv                  compat_sys_preadv64
+535    x32     pwritev                 compat_sys_pwritev64
+536    x32     rt_tgsigqueueinfo       compat_sys_rt_tgsigqueueinfo
+537    x32     recvmmsg                compat_sys_recvmmsg_time64
+538    x32     sendmmsg                compat_sys_sendmmsg
+539    x32     process_vm_readv        compat_sys_process_vm_readv
+540    x32     process_vm_writev       compat_sys_process_vm_writev
+541    x32     setsockopt              compat_sys_setsockopt
+542    x32     getsockopt              compat_sys_getsockopt
+543    x32     io_setup                compat_sys_io_setup
+544    x32     io_submit               compat_sys_io_submit
+545    x32     execveat                compat_sys_execveat
+546    x32     preadv2                 compat_sys_preadv64v2
+547    x32     pwritev2                compat_sys_pwritev64v2
index bfb21d049e6ce1569c301ac8813c7b3488cd2244..cf147db4e5ca0ea232ce4b9bf0f42e55580f97c0 100755 (executable)
@@ -22,7 +22,9 @@ include/uapi/linux/usbdevice_fs.h
 include/uapi/linux/vhost.h
 include/uapi/sound/asound.h
 include/linux/bits.h
+include/vdso/bits.h
 include/linux/const.h
+include/vdso/const.h
 include/linux/hash.h
 include/uapi/linux/hw_breakpoint.h
 arch/x86/include/asm/disabled-features.h
@@ -115,6 +117,7 @@ check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/ex
 check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
 check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
 check include/uapi/linux/mman.h       '-I "^#include <\(uapi/\)*asm/mman.h>"'
+check include/linux/build_bug.h       '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
 check include/linux/ctype.h          '-I "isdigit("'
 check lib/ctype.c                    '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
 check arch/x86/include/asm/inat.h     '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
index 062ca849c8fda9c0d032c154c380ddf067a34a26..f4db894e0af6d14bb0ead601ae7424d097be019a 100644 (file)
@@ -46,6 +46,7 @@ static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size,
        P_FLAG(NEWNET);
        P_FLAG(IO);
        P_FLAG(CLEAR_SIGHAND);
+       P_FLAG(INTO_CGROUP);
 #undef P_FLAG
 
        if (flags)
index 9fa771a90d79c2aad3c49338a2d7dbca922630ca..862c8331dded677cd9e1fb0563fa272472b4e42b 100644 (file)
@@ -69,6 +69,7 @@ static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
 
        P_MREMAP_FLAG(MAYMOVE);
        P_MREMAP_FLAG(FIXED);
+       P_MREMAP_FLAG(DONTUNMAP);
 #undef P_MREMAP_FLAG
 
        if (flags)
index 347b2c0789e4c05c5b13c3197f7df5521f8257b8..c5e3e9a68162d784287f8837c2c802808df517a0 100644 (file)
@@ -21,6 +21,8 @@ if cc_is_clang:
             vars[var] = sub("-fstack-clash-protection", "", vars[var])
         if not clang_has_option("-fstack-protector-strong"):
             vars[var] = sub("-fstack-protector-strong", "", vars[var])
+        if not clang_has_option("-fno-semantic-interposition"):
+            vars[var] = sub("-fno-semantic-interposition", "", vars[var])
 
 from distutils.core import setup, Extension
 
index 0fd713d3674f611813b4af461474916b991b6e53..03ecb8cd0eeca97c3769d3480575d6d4af13f349 100644 (file)
@@ -803,8 +803,11 @@ static void generic_metric(struct perf_stat_config *config,
                                     out->force_header ?
                                     (metric_name ? metric_name : name) : "", 0);
                }
-       } else
-               print_metric(config, ctxp, NULL, NULL, "", 0);
+       } else {
+               print_metric(config, ctxp, NULL, NULL,
+                            out->force_header ?
+                            (metric_name ? metric_name : name) : "", 0);
+       }
 
        for (i = 1; i < pctx.num_ids; i++)
                zfree(&pctx.ids[i].name);
index 16a814eb4d6459de76f6cccfcb14d02a2cf44da3..56d80adcf4bdec8d074d1036a87333901e2bfa59 100644 (file)
@@ -19,15 +19,16 @@ void test_mmap(void)
        const size_t map_sz = roundup_page(sizeof(struct map_data));
        const int zero = 0, one = 1, two = 2, far = 1500;
        const long page_size = sysconf(_SC_PAGE_SIZE);
-       int err, duration = 0, i, data_map_fd;
+       int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd;
        struct bpf_map *data_map, *bss_map;
        void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
        struct test_mmap__bss *bss_data;
+       struct bpf_map_info map_info;
+       __u32 map_info_sz = sizeof(map_info);
        struct map_data *map_data;
        struct test_mmap *skel;
        __u64 val = 0;
 
-
        skel = test_mmap__open_and_load();
        if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
                return;
@@ -36,6 +37,14 @@ void test_mmap(void)
        data_map = skel->maps.data_map;
        data_map_fd = bpf_map__fd(data_map);
 
+       /* get map's ID */
+       memset(&map_info, 0, map_info_sz);
+       err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
+       if (CHECK(err, "map_get_info", "failed %d\n", errno))
+               goto cleanup;
+       data_map_id = map_info.id;
+
+       /* mmap BSS map */
        bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
                          bpf_map__fd(bss_map), 0);
        if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
@@ -98,6 +107,10 @@ void test_mmap(void)
                  "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
                goto cleanup;
 
+       err = mprotect(map_mmaped, map_sz, PROT_READ);
+       if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
+               goto cleanup;
+
        /* unmap R/W mapping */
        err = munmap(map_mmaped, map_sz);
        map_mmaped = NULL;
@@ -111,6 +124,12 @@ void test_mmap(void)
                map_mmaped = NULL;
                goto cleanup;
        }
+       err = mprotect(map_mmaped, map_sz, PROT_WRITE);
+       if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
+               goto cleanup;
+       err = mprotect(map_mmaped, map_sz, PROT_EXEC);
+       if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
+               goto cleanup;
        map_data = map_mmaped;
 
        /* map/unmap in a loop to test ref counting */
@@ -197,6 +216,45 @@ void test_mmap(void)
        CHECK_FAIL(map_data->val[far] != 3 * 321);
 
        munmap(tmp2, 4 * page_size);
+
+       tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
+       if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
+               goto cleanup;
+
+       test_mmap__destroy(skel);
+       skel = NULL;
+       CHECK_FAIL(munmap(bss_mmaped, bss_sz));
+       bss_mmaped = NULL;
+       CHECK_FAIL(munmap(map_mmaped, map_sz));
+       map_mmaped = NULL;
+
+       /* map should be still held by active mmap */
+       tmp_fd = bpf_map_get_fd_by_id(data_map_id);
+       if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
+               munmap(tmp1, map_sz);
+               goto cleanup;
+       }
+       close(tmp_fd);
+
+       /* this should release data map finally */
+       munmap(tmp1, map_sz);
+
+       /* we need to wait for RCU grace period */
+       for (i = 0; i < 10000; i++) {
+               __u32 id = data_map_id - 1;
+               if (bpf_map_get_next_id(id, &id) || id > data_map_id)
+                       break;
+               usleep(1);
+       }
+
+       /* should fail to get map FD by non-existing ID */
+       tmp_fd = bpf_map_get_fd_by_id(data_map_id);
+       if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
+                 "unexpectedly succeeded %d\n", tmp_fd)) {
+               close(tmp_fd);
+               goto cleanup;
+       }
+
 cleanup:
        if (bss_mmaped)
                CHECK_FAIL(munmap(bss_mmaped, bss_sz));
index 9d9351dc2ded422d92a0b050ae386e192839e1c0..713167449c9898a58264c5a88d913413b03e6f89 100644 (file)
@@ -43,18 +43,18 @@ static struct sec_name_test tests[] = {
        {"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} },
        {
                "cgroup_skb/ingress",
-               {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
+               {0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS},
                {0, BPF_CGROUP_INET_INGRESS},
        },
        {
                "cgroup_skb/egress",
-               {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
+               {0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS},
                {0, BPF_CGROUP_INET_EGRESS},
        },
        {"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} },
        {
                "cgroup/sock",
-               {0, BPF_PROG_TYPE_CGROUP_SOCK, 0},
+               {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE},
                {0, BPF_CGROUP_INET_SOCK_CREATE},
        },
        {
@@ -69,26 +69,38 @@ static struct sec_name_test tests[] = {
        },
        {
                "cgroup/dev",
-               {0, BPF_PROG_TYPE_CGROUP_DEVICE, 0},
+               {0, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_CGROUP_DEVICE},
                {0, BPF_CGROUP_DEVICE},
        },
-       {"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} },
+       {
+               "sockops",
+               {0, BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS},
+               {0, BPF_CGROUP_SOCK_OPS},
+       },
        {
                "sk_skb/stream_parser",
-               {0, BPF_PROG_TYPE_SK_SKB, 0},
+               {0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_PARSER},
                {0, BPF_SK_SKB_STREAM_PARSER},
        },
        {
                "sk_skb/stream_verdict",
-               {0, BPF_PROG_TYPE_SK_SKB, 0},
+               {0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_VERDICT},
                {0, BPF_SK_SKB_STREAM_VERDICT},
        },
        {"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} },
-       {"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} },
-       {"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} },
+       {
+               "sk_msg",
+               {0, BPF_PROG_TYPE_SK_MSG, BPF_SK_MSG_VERDICT},
+               {0, BPF_SK_MSG_VERDICT},
+       },
+       {
+               "lirc_mode2",
+               {0, BPF_PROG_TYPE_LIRC_MODE2, BPF_LIRC_MODE2},
+               {0, BPF_LIRC_MODE2},
+       },
        {
                "flow_dissector",
-               {0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0},
+               {0, BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_FLOW_DISSECTOR},
                {0, BPF_FLOW_DISSECTOR},
        },
        {
@@ -158,17 +170,17 @@ static void test_prog_type_by_name(const struct sec_name_test *test)
                                      &expected_attach_type);
 
        CHECK(rc != test->expected_load.rc, "check_code",
-             "prog: unexpected rc=%d for %s", rc, test->sec_name);
+             "prog: unexpected rc=%d for %s\n", rc, test->sec_name);
 
        if (rc)
                return;
 
        CHECK(prog_type != test->expected_load.prog_type, "check_prog_type",
-             "prog: unexpected prog_type=%d for %s",
+             "prog: unexpected prog_type=%d for %s\n",
              prog_type, test->sec_name);
 
        CHECK(expected_attach_type != test->expected_load.expected_attach_type,
-             "check_attach_type", "prog: unexpected expected_attach_type=%d for %s",
+             "check_attach_type", "prog: unexpected expected_attach_type=%d for %s\n",
              expected_attach_type, test->sec_name);
 }
 
@@ -180,13 +192,13 @@ static void test_attach_type_by_name(const struct sec_name_test *test)
        rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
 
        CHECK(rc != test->expected_attach.rc, "check_ret",
-             "attach: unexpected rc=%d for %s", rc, test->sec_name);
+             "attach: unexpected rc=%d for %s\n", rc, test->sec_name);
 
        if (rc)
                return;
 
        CHECK(attach_type != test->expected_attach.attach_type,
-             "check_attach_type", "attach: unexpected attach_type=%d for %s",
+             "check_attach_type", "attach: unexpected attach_type=%d for %s\n",
              attach_type, test->sec_name);
 }
 
index 1e4c258de09d976a1ace45b71d8fc555c9414294..b17eb2045c1d1f601d92802318a9c7b73e0bd546 100644 (file)
 
 char *CMD_ARGS[] = {"true", NULL};
 
-int heap_mprotect(void)
+#define GET_PAGE_ADDR(ADDR, PAGE_SIZE)                                 \
+       (char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1))
+
+int stack_mprotect(void)
 {
        void *buf;
        long sz;
@@ -25,12 +28,9 @@ int heap_mprotect(void)
        if (sz < 0)
                return sz;
 
-       buf = memalign(sz, 2 * sz);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       ret = mprotect(buf, sz, PROT_READ | PROT_WRITE | PROT_EXEC);
-       free(buf);
+       buf = alloca(sz * 3);
+       ret = mprotect(GET_PAGE_ADDR(buf, sz), sz,
+                      PROT_READ | PROT_WRITE | PROT_EXEC);
        return ret;
 }
 
@@ -73,8 +73,8 @@ void test_test_lsm(void)
 
        skel->bss->monitored_pid = getpid();
 
-       err = heap_mprotect();
-       if (CHECK(errno != EPERM, "heap_mprotect", "want errno=EPERM, got %d\n",
+       err = stack_mprotect();
+       if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n",
                  errno))
                goto close_prog;
 
index 05b294d6b9239df13819543a2712f2aeb5e3a6af..15ef3531483ef3d5bfb91cc7cf7d044b71f941f8 100644 (file)
@@ -6,19 +6,34 @@
 
 void test_xdp_attach(void)
 {
+       __u32 duration = 0, id1, id2, id0 = 0, len;
        struct bpf_object *obj1, *obj2, *obj3;
        const char *file = "./test_xdp.o";
+       struct bpf_prog_info info = {};
        int err, fd1, fd2, fd3;
-       __u32 duration = 0;
        DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts,
                            .old_fd = -1);
 
+       len = sizeof(info);
+
        err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
        if (CHECK_FAIL(err))
                return;
+       err = bpf_obj_get_info_by_fd(fd1, &info, &len);
+       if (CHECK_FAIL(err))
+               goto out_1;
+       id1 = info.id;
+
        err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
        if (CHECK_FAIL(err))
                goto out_1;
+
+       memset(&info, 0, sizeof(info));
+       err = bpf_obj_get_info_by_fd(fd2, &info, &len);
+       if (CHECK_FAIL(err))
+               goto out_2;
+       id2 = info.id;
+
        err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
        if (CHECK_FAIL(err))
                goto out_2;
@@ -28,6 +43,11 @@ void test_xdp_attach(void)
        if (CHECK(err, "load_ok", "initial load failed"))
                goto out_close;
 
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
+       if (CHECK(err || id0 != id1, "id1_check",
+                 "loaded prog id %u != id1 %u, err %d", id0, id1, err))
+               goto out_close;
+
        err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE,
                                       &opts);
        if (CHECK(!err, "load_fail", "load with expected id didn't fail"))
@@ -37,6 +57,10 @@ void test_xdp_attach(void)
        err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts);
        if (CHECK(err, "replace_ok", "replace valid old_fd failed"))
                goto out;
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
+       if (CHECK(err || id0 != id2, "id2_check",
+                 "loaded prog id %u != id2 %u, err %d", id0, id2, err))
+               goto out_close;
 
        err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts);
        if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail"))
@@ -51,6 +75,10 @@ void test_xdp_attach(void)
        if (CHECK(err, "remove_ok", "remove valid old_fd failed"))
                goto out;
 
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
+       if (CHECK(err || id0 != 0, "unload_check",
+                 "loaded prog id %u != 0, err %d", id0, err))
+               goto out_close;
 out:
        bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
 out_close:
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
new file mode 100644 (file)
index 0000000..d2d7a28
--- /dev/null
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/if_link.h>
+#include <test_progs.h>
+
+#define IFINDEX_LO 1
+
+void test_xdp_info(void)
+{
+       __u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id;
+       const char *file = "./xdp_dummy.o";
+       struct bpf_prog_info info = {};
+       struct bpf_object *obj;
+       int err, prog_fd;
+
+       /* Get prog_id for XDP_ATTACHED_NONE mode */
+
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0);
+       if (CHECK(err, "get_xdp_none", "errno=%d\n", errno))
+               return;
+       if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id))
+               return;
+
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE);
+       if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno))
+               return;
+       if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n",
+                 prog_id))
+               return;
+
+       /* Setup prog */
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+       if (CHECK_FAIL(err))
+               return;
+
+       err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
+       if (CHECK(err, "get_prog_info", "errno=%d\n", errno))
+               goto out_close;
+
+       err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
+       if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno))
+               goto out_close;
+
+       /* Get prog_id for single prog mode */
+
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0);
+       if (CHECK(err, "get_xdp", "errno=%d\n", errno))
+               goto out;
+       if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n"))
+               goto out;
+
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE);
+       if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno))
+               goto out;
+       if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n"))
+               goto out;
+
+       err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_DRV_MODE);
+       if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno))
+               goto out;
+       if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))
+               goto out;
+
+out:
+       bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
+out_close:
+       bpf_object__close(obj);
+}
index a4e3c223028d750613ccac75dce4e00169978fe5..b4598d4bc4f7307bf013bbb75ebf769c1cffa57d 100644 (file)
@@ -23,12 +23,12 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
                return ret;
 
        __u32 pid = bpf_get_current_pid_tgid() >> 32;
-       int is_heap = 0;
+       int is_stack = 0;
 
-       is_heap = (vma->vm_start >= vma->vm_mm->start_brk &&
-                  vma->vm_end <= vma->vm_mm->brk);
+       is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
+                   vma->vm_end >= vma->vm_mm->start_stack);
 
-       if (is_heap && monitored_pid == pid) {
+       if (is_stack && monitored_pid == pid) {
                mprotect_count++;
                ret = -EPERM;
        }
index 4d0d09574bf4bb4a1ed42267821c0040771d1f5c..a253a064e6e059056d6389b87c5540e5cc37ebfe 100644 (file)
        .result = REJECT
 },
 {
-       "bounds check mixed 32bit and 64bit arithmatic. test1",
+       "bounds check mixed 32bit and 64bit arithmetic. test1",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_MOV64_IMM(BPF_REG_1, -1),
        .result = ACCEPT
 },
 {
-       "bounds check mixed 32bit and 64bit arithmatic. test2",
+       "bounds check mixed 32bit and 64bit arithmetic. test2",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_MOV64_IMM(BPF_REG_1, -1),
index e566c70e64a1971e653a2922012e5e420a85b193..a3e43189d94003db8170a91f7d9b71ab4f1a407d 100755 (executable)
@@ -713,9 +713,8 @@ def set_operation_mode(pm, parser, args, remaining):
         exit(0)
 
     if args.list:
-        if args.list:
-            list_test_cases(alltests)
-            exit(0)
+        list_test_cases(alltests)
+        exit(0)
 
     if len(alltests):
         req_plugins = pm.get_required_plugins(alltests)