]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu.h
drm/amdgpu: make pipeline sync be in same place v2
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu.h
index 833c3c16501a0221da5161eaec189a5576292abc..e2cafbd690c0cc8a830845688b16f89cdf500b2b 100644 (file)
@@ -554,7 +554,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages);
 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist,
@@ -907,6 +907,7 @@ struct amdgpu_mec {
 struct amdgpu_kiq {
        u64                     eop_gpu_addr;
        struct amdgpu_bo        *eop_obj;
+       struct mutex            ring_mutex;
        struct amdgpu_ring      ring;
        struct amdgpu_irq_src   irq;
 };
@@ -1061,6 +1062,8 @@ struct amdgpu_gfx {
        uint32_t                        grbm_soft_reset;
        uint32_t                        srbm_soft_reset;
        bool                            in_reset;
+       /* s3/s4 mask */
+       bool                            in_suspend;
        /* NGG */
        struct amdgpu_ngg               ngg;
 };
@@ -1114,7 +1117,6 @@ struct amdgpu_cs_parser {
 #define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0) /* bit set means command submit involves a preamble IB */
 #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1) /* bit set means preamble IB is first presented in belonging context */
 #define AMDGPU_HAVE_CTX_SWITCH              (1 << 2) /* bit set means context switch occured */
-#define AMDGPU_VM_DOMAIN                    (1 << 3) /* bit set means in virtual memory context */
 
 struct amdgpu_job {
        struct amd_sched_job    base;
@@ -1122,6 +1124,7 @@ struct amdgpu_job {
        struct amdgpu_vm        *vm;
        struct amdgpu_ring      *ring;
        struct amdgpu_sync      sync;
+       struct amdgpu_sync      sched_sync;
        struct amdgpu_ib        *ibs;
        struct dma_fence        *fence; /* the hw fence */
        uint32_t                preamble_status;
@@ -1129,7 +1132,6 @@ struct amdgpu_job {
        void                    *owner;
        uint64_t                fence_ctx; /* the fence_context this job uses */
        bool                    vm_needs_flush;
-       bool                    need_pipeline_sync;
        unsigned                vm_id;
        uint64_t                vm_pd_addr;
        uint32_t                gds_base, gds_size;
@@ -1296,7 +1298,6 @@ struct amdgpu_smumgr {
  */
 struct amdgpu_allowed_register_entry {
        uint32_t reg_offset;
-       bool untouched;
        bool grbm_indexed;
 };
 
@@ -1733,30 +1734,31 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *sr
        unsigned occupied, chunk1, chunk2;
        void *dst;
 
-       if (ring->count_dw < count_dw) {
+       if (unlikely(ring->count_dw < count_dw)) {
                DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
-       } else {
-               occupied = ring->wptr & ring->buf_mask;
-               dst = (void *)&ring->ring[occupied];
-               chunk1 = ring->buf_mask + 1 - occupied;
-               chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
-               chunk2 = count_dw - chunk1;
-               chunk1 <<= 2;
-               chunk2 <<= 2;
-
-               if (chunk1)
-                       memcpy(dst, src, chunk1);
-
-               if (chunk2) {
-                       src += chunk1;
-                       dst = (void *)ring->ring;
-                       memcpy(dst, src, chunk2);
-               }
-
-               ring->wptr += count_dw;
-               ring->wptr &= ring->ptr_mask;
-               ring->count_dw -= count_dw;
+               return;
        }
+
+       occupied = ring->wptr & ring->buf_mask;
+       dst = (void *)&ring->ring[occupied];
+       chunk1 = ring->buf_mask + 1 - occupied;
+       chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
+       chunk2 = count_dw - chunk1;
+       chunk1 <<= 2;
+       chunk2 <<= 2;
+
+       if (chunk1)
+               memcpy(dst, src, chunk1);
+
+       if (chunk2) {
+               src += chunk1;
+               dst = (void *)ring->ring;
+               memcpy(dst, src, chunk2);
+       }
+
+       ring->wptr += count_dw;
+       ring->wptr &= ring->ptr_mask;
+       ring->count_dw -= count_dw;
 }
 
 static inline struct amdgpu_sdma_instance *
@@ -1813,6 +1815,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
 #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
+#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -1912,10 +1915,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
-                                   int *max_error,
-                                   struct timeval *vblank_time,
-                                   unsigned flags);
 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
                             unsigned long arg);