]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/radeon: Disable HDP flush before every CS again for < r600
authorMichel Dänzer <michel.daenzer@amd.com>
Wed, 17 Sep 2014 07:25:55 +0000 (16:25 +0900)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 18 Sep 2014 22:57:07 +0000 (18:57 -0400)
It was causing display corruption with R300 generation GPUs at least.

Reported-and-Tested-by: Mikael Pettersson <mikpelinux@gmail.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_drv.c

index 4c5ec44ff328b3c63203e4c7c92f5283126e65e9..b0098e792e62337a6b4d511ca62baa7f37bc4eba 100644 (file)
@@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
                return RREG32(RADEON_CRTC2_CRNT_FRAME);
 }
 
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+                               RADEON_HDP_READ_BUFFER_INVALIDATE);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
 /* Who ever call radeon_fence_emit should call ring_lock and ask
  * for enough space (today caller are ib schedule and buffer move) */
 void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
        (void)RREG32(RADEON_CP_RB_WPTR);
 }
 
-/**
- * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
- * rdev: radeon device structure
- * ring: ring buffer struct for emitting packets
- */
-void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
-{
-       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
-                               RADEON_HDP_READ_BUFFER_INVALIDATE);
-       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
-}
-
 static void r100_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
index eeeeabe09758ddccf2175ad48d9b9bd62e896e83..2dd5847f9b98e8a428e63b9ff9563f52b94d2709 100644 (file)
@@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
-       .hdp_flush = &r100_ring_hdp_flush,
 };
 
 static struct radeon_asic r100_asic = {
@@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
        .get_rptr = &r100_gfx_get_rptr,
        .get_wptr = &r100_gfx_get_wptr,
        .set_wptr = &r100_gfx_set_wptr,
-       .hdp_flush = &r100_ring_hdp_flush,
 };
 
 static struct radeon_asic r300_asic = {
index 275a5dc01780f7bedbed2cb3486c4c3d383710eb..7756bc1e1cd3ef088ac4bb96fa04a9ab632ad088 100644 (file)
@@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
                      struct radeon_ring *ring);
 void r100_gfx_set_wptr(struct radeon_device *rdev,
                       struct radeon_ring *ring);
-void r100_ring_hdp_flush(struct radeon_device *rdev,
-                        struct radeon_ring *ring);
+
 /*
  * r200,rv250,rs300,rv280
  */
index 8df888908833f5d1a3e8d9f745edb5fe9b6143ea..e8545be7d5845dfad813d7b21474fa48c37d5699 100644 (file)
@@ -83,7 +83,7 @@
  *            CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
  *   2.39.0 - Add INFO query for number of active CUs
  *   2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
- *            CS to GPU
+ *            CS to GPU on >= r600
  */
 #define KMS_DRIVER_MAJOR       2
 #define KMS_DRIVER_MINOR       40