]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drm/amdgpu: merge amd_sched_entity and amd_context_entity v2
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sched.c
CommitLineData
c1b69ed0
CZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
29
30static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
91404fb2 31 struct amd_sched_entity *entity,
c1b69ed0
CZ
32 void *job)
33{
34 int r = 0;
35 struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
dd01d071 36 if (sched_job->prepare_job) {
c1b69ed0 37 r = sched_job->prepare_job(sched_job);
dd01d071
JZ
38 if (r) {
39 DRM_ERROR("Prepare job error\n");
40 schedule_work(&sched_job->job_work);
41 }
c1b69ed0
CZ
42 }
43 return r;
44}
45
7484667c
CZ
46static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
47{
4cef9267
CZ
48 struct amd_sched_job *sched_job =
49 container_of(cb, struct amd_sched_job, cb);
50 amd_sched_process_job(sched_job);
7484667c
CZ
51}
52
c1b69ed0 53static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
91404fb2 54 struct amd_sched_entity *entity,
4cef9267 55 struct amd_sched_job *job)
c1b69ed0
CZ
56{
57 int r = 0;
4cef9267 58 struct amdgpu_cs_parser *sched_job;
7484667c 59 struct amdgpu_fence *fence;
c1b69ed0 60
4cef9267
CZ
61 if (!job || !job->job) {
62 DRM_ERROR("job is null\n");
63 return;
64 }
65 sched_job = (struct amdgpu_cs_parser *)job->job;
c1b69ed0
CZ
66 mutex_lock(&sched_job->job_lock);
67 r = amdgpu_ib_schedule(sched_job->adev,
68 sched_job->num_ibs,
69 sched_job->ibs,
70 sched_job->filp);
71 if (r)
72 goto err;
7484667c
CZ
73 fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
74 if (fence_add_callback(&fence->base,
4cef9267
CZ
75 &job->cb, amdgpu_fence_sched_cb)) {
76 DRM_ERROR("fence add callback failed\n");
7484667c 77 goto err;
4cef9267 78 }
7484667c 79
c1b69ed0
CZ
80 if (sched_job->run_job) {
81 r = sched_job->run_job(sched_job);
82 if (r)
83 goto err;
84 }
f95b7e3e 85
91404fb2 86 amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
4b559c90 87
c1b69ed0
CZ
88 mutex_unlock(&sched_job->job_lock);
89 return;
90err:
91 DRM_ERROR("Run job error\n");
92 mutex_unlock(&sched_job->job_lock);
93 schedule_work(&sched_job->job_work);
94}
95
96static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
97{
98 struct amdgpu_cs_parser *sched_job = NULL;
99 struct amdgpu_fence *fence = NULL;
100 struct amdgpu_ring *ring = NULL;
101 struct amdgpu_device *adev = NULL;
c1b69ed0
CZ
102
103 if (!job)
104 return;
105 sched_job = (struct amdgpu_cs_parser *)job;
106 fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
107 if (!fence)
108 return;
109 ring = fence->ring;
110 adev = ring->adev;
111
c1b69ed0
CZ
112 schedule_work(&sched_job->job_work);
113}
114
115struct amd_sched_backend_ops amdgpu_sched_ops = {
116 .prepare_job = amdgpu_sched_prepare_job,
117 .run_job = amdgpu_sched_run_job,
118 .process_job = amdgpu_sched_process_job
119};
120
3c704e93
CZ
121int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
122 struct amdgpu_ring *ring,
123 struct amdgpu_ib *ibs,
124 unsigned num_ibs,
125 int (*free_job)(struct amdgpu_cs_parser *),
1763552e
CZ
126 void *owner,
127 struct fence **f)
3c704e93
CZ
128{
129 int r = 0;
130 if (amdgpu_enable_scheduler) {
80de5913 131 uint64_t v_seq;
3c704e93 132 struct amdgpu_cs_parser *sched_job =
47f38501 133 amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
3c704e93
CZ
134 ibs, 1);
135 if(!sched_job) {
136 return -ENOMEM;
137 }
138 sched_job->free_job = free_job;
91404fb2 139 v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
80de5913
CZ
140 ibs[num_ibs - 1].sequence = v_seq;
141 amd_sched_push_job(ring->scheduler,
91404fb2 142 &adev->kernel_ctx.rings[ring->idx].entity,
3c704e93
CZ
143 sched_job);
144 r = amd_sched_wait_emit(
91404fb2 145 &adev->kernel_ctx.rings[ring->idx].entity,
80de5913
CZ
146 v_seq,
147 false,
148 -1);
3c704e93
CZ
149 if (r)
150 WARN(true, "emit timeout\n");
151 } else
152 r = amdgpu_ib_schedule(adev, 1, ibs, owner);
1763552e
CZ
153 if (r)
154 return r;
155 *f = &ibs[num_ibs - 1].fence->base;
156 return 0;
3c704e93 157}