]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/amdgpu: move context switch handling into common code v2
authorChristian König <christian.koenig@amd.com>
Fri, 6 May 2016 13:31:19 +0000 (15:31 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 11 May 2016 17:30:30 +0000 (13:30 -0400)
It was a source of bugs to repeat that in each IP version.

v2: rename parameter

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
12 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c

index 781f0471ef063a42e32b379b6c84cf3a72cb93df..db87edc7293684658c65daf12d9ac185c0c24b7e 100644 (file)
@@ -283,7 +283,7 @@ struct amdgpu_ring_funcs {
        int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
        /* command emit functions */
        void (*emit_ib)(struct amdgpu_ring *ring,
-                       struct amdgpu_ib *ib);
+                       struct amdgpu_ib *ib, bool ctx_switch);
        void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
                           uint64_t seq, unsigned flags);
        void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -2221,7 +2221,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
+#define amdgpu_ring_emit_ib(r, ib, c) (r)->funcs->emit_ib((r), (ib), (c))
 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
index d6f85923edcd460838d56ab9602495ec7b4a848c..88b8fda7340f0c25b4ca217ec7dbe4143d03f048 100644 (file)
@@ -121,18 +121,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib *ib = &ibs[0];
-       uint64_t ctx, old_ctx;
        struct fence *hwf;
        struct amdgpu_vm *vm = NULL;
        unsigned i, patch_offset = ~0;
-       bool skip_preamble;
+       bool skip_preamble, need_ctx_switch;
 
        int r = 0;
 
        if (num_ibs == 0)
                return -EINVAL;
 
-       ctx = ibs->ctx;
        if (job) /* for domain0 job like ring test, ibs->job is not assigned */
                vm = job->vm;
 
@@ -156,7 +154,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                patch_offset = amdgpu_ring_init_cond_exec(ring);
 
        if (vm) {
-               /* do context switch */
                r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
                                    ib->gds_base, ib->gds_size,
                                    ib->gws_base, ib->gws_size,
@@ -173,16 +170,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* always set cond_exec_polling to CONTINUE */
        *ring->cond_exe_cpu_addr = 1;
 
-       skip_preamble = ring->current_ctx == ctx;
-       old_ctx = ring->current_ctx;
+       skip_preamble = ring->current_ctx == ib->ctx;
+       need_ctx_switch = ring->current_ctx != ib->ctx;
        for (i = 0; i < num_ibs; ++i) {
+               ib = &ibs[i];
 
                /* drop preamble IBs if we don't have a context switch */
                if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
                        continue;
 
-               amdgpu_ring_emit_ib(ring, ib);
-               ring->current_ctx = ctx;
+               amdgpu_ring_emit_ib(ring, ib, need_ctx_switch);
+               need_ctx_switch = false;
        }
 
        if (ring->funcs->emit_hdp_invalidate)
@@ -191,7 +189,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        r = amdgpu_fence_emit(ring, &hwf);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
-               ring->current_ctx = old_ctx;
                if (ib->vm_id)
                        amdgpu_vm_reset_id(adev, ib->vm_id);
                amdgpu_ring_undo(ring);
@@ -212,6 +209,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
+       ring->current_ctx = ibs->ctx;
        amdgpu_ring_commit(ring);
        return 0;
 }
index 7b7b0f64530aa1ce678fb76ed77196af1aaf1aaa..ad91664a764953726d8f19aa03bfab543ecdbabc 100644 (file)
@@ -762,7 +762,7 @@ out:
  * @ib: the IB to execute
  *
  */
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, bool ctx_switch)
 {
        amdgpu_ring_write(ring, VCE_CMD_IB);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index ef99d237018259bf95489977f3ff6c5fc4a7c767..40d0650e3a3711e2bce783dc3b2a83b5f6f6dc53 100644 (file)
@@ -34,7 +34,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                               bool direct, struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
index df824f80ac07d795bc92087e6c407e81aaa47803..6c2aa2b863b2c3ac101281501efd14825b56f700 100644 (file)
@@ -210,7 +210,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (CIK).
  */
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
-                          struct amdgpu_ib *ib)
+                          struct amdgpu_ib *ib, bool ctx_switch)
 {
        u32 extra_bits = ib->vm_id & 0xf;
        u32 next_rptr = ring->wptr + 5;
index d82fa9641f0e0c4f85042601afd4da4fab954077..189ef2b23668336689c8a408eeeffc10e9ecd0d8 100644 (file)
@@ -2030,13 +2030,12 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib)
+                                     struct amdgpu_ib *ib, bool ctx_switch)
 {
-       bool need_ctx_switch = ring->current_ctx != ib->ctx;
        u32 header, control = 0;
        u32 next_rptr = ring->wptr + 5;
 
-       if (need_ctx_switch)
+       if (ctx_switch)
                next_rptr += 2;
 
        next_rptr += 4;
@@ -2047,7 +2046,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (need_ctx_switch) {
+       if (ctx_switch) {
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
        }
@@ -2070,7 +2069,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib)
+                                         struct amdgpu_ib *ib, bool ctx_switch)
 {
        u32 header, control = 0;
        u32 next_rptr = ring->wptr + 5;
index 9a0b6df210c174eeed3fc64ab980ee25287eea24..0d556c907ab6f8a4a727462f6ee8d8558a196716 100644 (file)
@@ -5646,13 +5646,12 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 }
 
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib)
+                                     struct amdgpu_ib *ib, bool ctx_switch)
 {
-       bool need_ctx_switch = ring->current_ctx != ib->ctx;
        u32 header, control = 0;
        u32 next_rptr = ring->wptr + 5;
 
-       if (need_ctx_switch)
+       if (ctx_switch)
                next_rptr += 2;
 
        next_rptr += 4;
@@ -5663,7 +5662,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (need_ctx_switch) {
+       if (ctx_switch) {
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
        }
@@ -5686,7 +5685,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib)
+                                         struct amdgpu_ib *ib, bool ctx_switch)
 {
        u32 header, control = 0;
        u32 next_rptr = ring->wptr + 5;
index 6be2c0faa1bcb6c71b0ea90406079415dd696535..de94adb2b19efe2bf5f499bb0eb3e6a90258d25e 100644 (file)
@@ -242,7 +242,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
-                                  struct amdgpu_ib *ib)
+                                  struct amdgpu_ib *ib, bool ctx_switch)
 {
        u32 vmid = ib->vm_id & 0xf;
        u32 next_rptr = ring->wptr + 5;
index b3dab09205af5df0ff8b2c6caded816e469a570f..ca2aee3e88a35b1fbccd03d317b9cfdb75e35849 100644 (file)
@@ -400,7 +400,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
-                                  struct amdgpu_ib *ib)
+                                  struct amdgpu_ib *ib, bool ctx_switch)
 {
        u32 vmid = ib->vm_id & 0xf;
        u32 next_rptr = ring->wptr + 5;
index 46a397654837c286cdad22afb95ea6aa361f7fee..a75ffb5b11b251f1f1ec2c1a7be1f30bde8d14c4 100644 (file)
@@ -489,7 +489,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib)
+                                 struct amdgpu_ib *ib, bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
        amdgpu_ring_write(ring, ib->gpu_addr);
index b96486c09250b244d8e049787e022b9536f628c8..ecb81014d8369cefc67fad2474a56ac6cacb64d1 100644 (file)
@@ -539,7 +539,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib)
+                                 struct amdgpu_ib *ib, bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 892bdac4bb213a401e6240d6420b8b0c255fe127..a43f1a7c58bc8c6be957fa7668bf382d61a586bf 100644 (file)
@@ -631,7 +631,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib)
+                                 struct amdgpu_ib *ib, bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));