extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
extern int amdgpu_enable_scheduler;
+extern int amdgpu_sched_jobs;
+extern int amdgpu_sched_hw_submission;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
struct amdgpu_bo *bo;
/* write-back address offset to bo start */
uint32_t offset;
- uint64_t sequence;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences,
- bool intr);
+ bool intr, long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);
return a->seq < b->seq;
}
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
void *owner, struct amdgpu_fence **fence);
/*
extern struct amd_sched_backend_ops amdgpu_sched_ops;
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_ib *ibs,
+ unsigned num_ibs,
+ int (*free_job)(struct amdgpu_cs_parser *),
+ void *owner);
+
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler *scheduler;
+ spinlock_t fence_lock;
struct mutex *ring_lock;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
struct amdgpu_ctx *current_ctx;
enum amdgpu_ring_type type;
char name[16];
+ bool is_pte_ring;
};
/*
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
+ struct fence *fence, uint64_t queued_seq);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
void __user *user_ptr;
};
+union amdgpu_sched_job_param {
+ struct {
+ struct amdgpu_vm *vm;
+ uint64_t start;
+ uint64_t last;
+ struct amdgpu_fence **fence;
+
+ } vm_mapping;
+ struct {
+ struct amdgpu_bo *bo;
+ } vm;
+};
+
struct amdgpu_cs_parser {
struct amdgpu_device *adev;
struct drm_file *filp;
struct mutex job_lock;
struct work_struct job_work;
int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
+ union amdgpu_sched_job_param job_param;
int (*run_job)(struct amdgpu_cs_parser *sched_job);
int (*free_job)(struct amdgpu_cs_parser *sched_job);
};
/* amdkfd interface */
struct kfd_dev *kfd;
+
+ /* kernel conext for IB submission */
+ struct amdgpu_ctx *kernel_ctx;
};
bool amdgpu_device_is_px(struct drm_device *dev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx,
+ struct amdgpu_ib *ibs,
+ uint32_t num_ibs);
+
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,