extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
extern int amdgpu_enable_scheduler;
+extern int amdgpu_sched_jobs;
+extern int amdgpu_sched_hw_submission;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
struct amdgpu_bo *bo;
/* write-back address offset to bo start */
uint32_t offset;
- uint64_t sequence;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences,
- bool intr);
+ bool intr, long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);
return a->seq < b->seq;
}
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
void *owner, struct amdgpu_fence **fence);
/*
extern struct amd_sched_backend_ops amdgpu_sched_ops;
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_ib *ibs,
+ unsigned num_ibs,
+ int (*free_job)(struct amdgpu_cs_parser *),
+ void *owner);
+
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
+ struct fence *fence, uint64_t queued_seq);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);