]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu.h
drm/amdgpu: new implement for fence_wait_any (v2)
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu.h
index baefa635169a953f4cb41840b24de11b632e7345..5f32f859230b3ae77d2b89c59383507ab9c34b4a 100644 (file)
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_execbuf_util.h>
 
+#include <drm/drmP.h>
 #include <drm/drm_gem.h>
 #include <drm/amdgpu_drm.h>
 
 #include "amd_shared.h"
-#include "amdgpu_family.h"
 #include "amdgpu_mode.h"
 #include "amdgpu_ih.h"
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_gds.h"
 
+#include "gpu_scheduler.h"
+
 /*
  * Modules parameters.
  */
@@ -77,7 +79,11 @@ extern int amdgpu_bapm;
 extern int amdgpu_deep_color;
 extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
+extern int amdgpu_enable_scheduler;
+extern int amdgpu_sched_jobs;
+extern int amdgpu_sched_hw_submission;
 
+#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT           (HZ / 2)
 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@@ -178,6 +184,7 @@ struct amdgpu_ring;
 struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
 struct amdgpu_irq_src;
+struct amdgpu_fpriv;
 
 enum amdgpu_cp_irq {
        AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -381,7 +388,6 @@ struct amdgpu_fence_driver {
        uint64_t                        sync_seq[AMDGPU_MAX_RINGS];
        atomic64_t                      last_seq;
        bool                            initialized;
-       bool                            delayed_irq;
        struct amdgpu_irq_src           *irq_src;
        unsigned                        irq_type;
        struct delayed_work             lockup_work;
@@ -423,10 +429,10 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   struct amdgpu_irq_src *irq_src,
                                   unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
                      struct amdgpu_fence **fence);
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
-                         uint64_t seq, struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -434,9 +440,9 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
 bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
 int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
                          struct amdgpu_fence **fences,
-                         bool intr);
+                         bool intr, long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -481,7 +487,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
        return a->seq < b->seq;
 }
 
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
                           void *owner, struct amdgpu_fence **fence);
 
 /*
@@ -697,8 +703,8 @@ struct amdgpu_sync {
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-                      struct amdgpu_fence *fence);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+                     struct fence *f);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
                     struct reservation_object *resv,
@@ -844,6 +850,8 @@ struct amdgpu_ib {
        uint32_t                        gws_base, gws_size;
        uint32_t                        oa_base, oa_size;
        uint32_t                        flags;
+       /* resulting sequence number */
+       uint64_t                        sequence;
 };
 
 enum amdgpu_ring_type {
@@ -854,11 +862,22 @@ enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_VCE
 };
 
+extern struct amd_sched_backend_ops amdgpu_sched_ops;
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+                                        struct amdgpu_ring *ring,
+                                        struct amdgpu_ib *ibs,
+                                        unsigned num_ibs,
+                                        int (*free_job)(struct amdgpu_cs_parser *),
+                                        void *owner);
+
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
        const struct amdgpu_ring_funcs  *funcs;
        struct amdgpu_fence_driver      fence_drv;
+       struct amd_gpu_scheduler        *scheduler;
 
+       spinlock_t              fence_lock;
        struct mutex            *ring_lock;
        struct amdgpu_bo        *ring_obj;
        volatile uint32_t       *ring;
@@ -892,6 +911,7 @@ struct amdgpu_ring {
        struct amdgpu_ctx       *current_ctx;
        enum amdgpu_ring_type   type;
        char                    name[16];
+       bool                    is_pte_ring;
 };
 
 /*
@@ -983,27 +1003,48 @@ struct amdgpu_vm_manager {
  * context related structures
  */
 
-struct amdgpu_ctx_state {
-       uint64_t flags;
-       uint32_t hangs;
+#define AMDGPU_CTX_MAX_CS_PENDING      16
+
+struct amdgpu_ctx_ring {
+       uint64_t        sequence;
+       struct fence    *fences[AMDGPU_CTX_MAX_CS_PENDING];
+       struct amd_context_entity c_entity;
 };
 
 struct amdgpu_ctx {
-       /* call kref_get()before CS start and kref_put() after CS fence signaled */
-       struct kref refcount;
-       struct amdgpu_fpriv *fpriv;
-       struct amdgpu_ctx_state state;
-       uint32_t id;
-       unsigned reset_counter;
+       struct kref             refcount;
+       struct amdgpu_device    *adev;
+       unsigned                reset_counter;
+       spinlock_t              ring_lock;
+       struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
 };
 
 struct amdgpu_ctx_mgr {
-       struct amdgpu_device *adev;
-       struct idr ctx_handles;
-       /* lock for IDR system */
-       struct mutex lock;
+       struct amdgpu_device    *adev;
+       struct mutex            lock;
+       /* protected by lock */
+       struct idr              ctx_handles;
 };
 
+int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
+                    uint32_t *id);
+int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
+                   uint32_t id);
+
+void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+                             struct fence *fence, uint64_t queued_seq);
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+                                  struct amdgpu_ring *ring, uint64_t seq);
+
+int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *filp);
+
+
 /*
  * file private structure
  */
@@ -1012,7 +1053,7 @@ struct amdgpu_fpriv {
        struct amdgpu_vm        vm;
        struct mutex            bo_list_lock;
        struct idr              bo_list_handles;
-       struct amdgpu_ctx_mgr ctx_mgr;
+       struct amdgpu_ctx_mgr   ctx_mgr;
 };
 
 /*
@@ -1032,6 +1073,9 @@ struct amdgpu_bo_list {
 struct amdgpu_bo_list *
 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+void amdgpu_bo_list_copy(struct amdgpu_device *adev,
+                        struct amdgpu_bo_list *dst,
+                        struct amdgpu_bo_list *src);
 void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
 
 /*
@@ -1186,6 +1230,19 @@ struct amdgpu_cs_chunk {
        void __user             *user_ptr;
 };
 
+union amdgpu_sched_job_param {
+       struct {
+               struct amdgpu_vm *vm;
+               uint64_t start;
+               uint64_t last;
+               struct amdgpu_fence **fence;
+
+       } vm_mapping;
+       struct {
+               struct amdgpu_bo *bo;
+       } vm;
+};
+
 struct amdgpu_cs_parser {
        struct amdgpu_device    *adev;
        struct drm_file         *filp;
@@ -1205,6 +1262,14 @@ struct amdgpu_cs_parser {
 
        /* user fence */
        struct amdgpu_user_fence uf;
+
+       struct amdgpu_ring *ring;
+       struct mutex job_lock;
+       struct work_struct job_work;
+       int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
+       union amdgpu_sched_job_param job_param;
+       int (*run_job)(struct amdgpu_cs_parser *sched_job);
+       int (*free_job)(struct amdgpu_cs_parser *sched_job);
 };
 
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
@@ -1849,17 +1914,12 @@ struct amdgpu_atcs {
        struct amdgpu_atcs_functions functions;
 };
 
-int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
-                                                       uint32_t *id,uint32_t flags);
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
-                                                 uint32_t id);
-
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
-struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
-int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+/*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
 
-extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
-                                                struct drm_file *filp);
 
 /*
  * Core structure, functions and helpers.
@@ -1883,7 +1943,7 @@ struct amdgpu_device {
        struct rw_semaphore             exclusive_lock;
 
        /* ASIC */
-       enum amdgpu_asic_type           asic_type;
+       enum amd_asic_type              asic_type;
        uint32_t                        family;
        uint32_t                        rev_id;
        uint32_t                        external_rev_id;
@@ -2028,6 +2088,9 @@ struct amdgpu_device {
 
        /* amdkfd interface */
        struct kfd_dev          *kfd;
+
+       /* kernel conext for IB submission */
+       struct amdgpu_ctx *kernel_ctx;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2215,6 +2278,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
 bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+                                                struct drm_file *filp,
+                                                struct amdgpu_ctx *ctx,
+                                                struct amdgpu_ib *ibs,
+                                                uint32_t num_ibs);
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
                       u32 ip_instance, u32 ring,
@@ -2278,8 +2347,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
                                          struct amdgpu_vm *vm,
                                          struct list_head *head);
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
-                                      struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+                     struct amdgpu_sync *sync);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
                     struct amdgpu_vm *vm,
                     struct amdgpu_fence *updates);