]>
Commit | Line | Data |
---|---|---|
4b63960e ZW |
1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Anhua Xu | |
25 | * Kevin Tian <kevin.tian@intel.com> | |
26 | * | |
27 | * Contributors: | |
28 | * Min He <min.he@intel.com> | |
29 | * Bing Niu <bing.niu@intel.com> | |
30 | * Zhi Wang <zhi.a.wang@intel.com> | |
31 | * | |
32 | */ | |
33 | ||
34 | #include "i915_drv.h" | |
feddf6e8 | 35 | #include "gvt.h" |
4b63960e ZW |
36 | |
37 | static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) | |
38 | { | |
0fac21e7 ZW |
39 | enum intel_engine_id i; |
40 | struct intel_engine_cs *engine; | |
4b63960e | 41 | |
0fac21e7 | 42 | for_each_engine(engine, vgpu->gvt->dev_priv, i) { |
4b63960e ZW |
43 | if (!list_empty(workload_q_head(vgpu, i))) |
44 | return true; | |
45 | } | |
46 | ||
47 | return false; | |
48 | } | |
49 | ||
50 | static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) | |
51 | { | |
52 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
0fac21e7 ZW |
53 | enum intel_engine_id i; |
54 | struct intel_engine_cs *engine; | |
4b63960e ZW |
55 | |
56 | /* no target to schedule */ | |
57 | if (!scheduler->next_vgpu) | |
58 | return; | |
59 | ||
60 | gvt_dbg_sched("try to schedule next vgpu %d\n", | |
61 | scheduler->next_vgpu->id); | |
62 | ||
63 | /* | |
64 | * after the flag is set, workload dispatch thread will | |
65 | * stop dispatching workload for current vgpu | |
66 | */ | |
67 | scheduler->need_reschedule = true; | |
68 | ||
69 | /* still have uncompleted workload? */ | |
0fac21e7 | 70 | for_each_engine(engine, gvt->dev_priv, i) { |
4b63960e ZW |
71 | if (scheduler->current_workload[i]) { |
72 | gvt_dbg_sched("still have running workload\n"); | |
73 | return; | |
74 | } | |
75 | } | |
76 | ||
77 | gvt_dbg_sched("switch to next vgpu %d\n", | |
78 | scheduler->next_vgpu->id); | |
79 | ||
80 | /* switch current vgpu */ | |
81 | scheduler->current_vgpu = scheduler->next_vgpu; | |
82 | scheduler->next_vgpu = NULL; | |
83 | ||
84 | scheduler->need_reschedule = false; | |
85 | ||
86 | /* wake up workload dispatch thread */ | |
0fac21e7 | 87 | for_each_engine(engine, gvt->dev_priv, i) |
4b63960e ZW |
88 | wake_up(&scheduler->waitq[i]); |
89 | } | |
90 | ||
91 | struct tbs_vgpu_data { | |
92 | struct list_head list; | |
93 | struct intel_vgpu *vgpu; | |
94 | /* put some per-vgpu sched stats here */ | |
95 | }; | |
96 | ||
97 | struct tbs_sched_data { | |
98 | struct intel_gvt *gvt; | |
99 | struct delayed_work work; | |
100 | unsigned long period; | |
101 | struct list_head runq_head; | |
102 | }; | |
103 | ||
2958b901 | 104 | #define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) |
4b63960e ZW |
105 | |
106 | static void tbs_sched_func(struct work_struct *work) | |
107 | { | |
108 | struct tbs_sched_data *sched_data = container_of(work, | |
109 | struct tbs_sched_data, work.work); | |
110 | struct tbs_vgpu_data *vgpu_data; | |
111 | ||
112 | struct intel_gvt *gvt = sched_data->gvt; | |
113 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | |
114 | ||
115 | struct intel_vgpu *vgpu = NULL; | |
116 | struct list_head *pos, *head; | |
117 | ||
118 | mutex_lock(&gvt->lock); | |
119 | ||
120 | /* no vgpu or has already had a target */ | |
121 | if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) | |
122 | goto out; | |
123 | ||
124 | if (scheduler->current_vgpu) { | |
125 | vgpu_data = scheduler->current_vgpu->sched_data; | |
126 | head = &vgpu_data->list; | |
127 | } else { | |
4b63960e ZW |
128 | head = &sched_data->runq_head; |
129 | } | |
130 | ||
131 | /* search a vgpu with pending workload */ | |
132 | list_for_each(pos, head) { | |
133 | if (pos == &sched_data->runq_head) | |
134 | continue; | |
135 | ||
136 | vgpu_data = container_of(pos, struct tbs_vgpu_data, list); | |
137 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) | |
138 | continue; | |
139 | ||
140 | vgpu = vgpu_data->vgpu; | |
141 | break; | |
142 | } | |
143 | ||
144 | if (vgpu) { | |
145 | scheduler->next_vgpu = vgpu; | |
146 | gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); | |
147 | } | |
148 | out: | |
149 | if (scheduler->next_vgpu) { | |
150 | gvt_dbg_sched("try to schedule next vgpu %d\n", | |
151 | scheduler->next_vgpu->id); | |
152 | try_to_schedule_next_vgpu(gvt); | |
153 | } | |
154 | ||
155 | /* | |
156 | * still have vgpu on runq | |
157 | * or last schedule haven't finished due to running workload | |
158 | */ | |
159 | if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu) | |
160 | schedule_delayed_work(&sched_data->work, sched_data->period); | |
161 | ||
162 | mutex_unlock(&gvt->lock); | |
163 | } | |
164 | ||
165 | static int tbs_sched_init(struct intel_gvt *gvt) | |
166 | { | |
167 | struct intel_gvt_workload_scheduler *scheduler = | |
168 | &gvt->scheduler; | |
169 | ||
170 | struct tbs_sched_data *data; | |
171 | ||
172 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
173 | if (!data) | |
174 | return -ENOMEM; | |
175 | ||
176 | INIT_LIST_HEAD(&data->runq_head); | |
177 | INIT_DELAYED_WORK(&data->work, tbs_sched_func); | |
178 | data->period = GVT_DEFAULT_TIME_SLICE; | |
179 | data->gvt = gvt; | |
180 | ||
181 | scheduler->sched_data = data; | |
182 | return 0; | |
183 | } | |
184 | ||
185 | static void tbs_sched_clean(struct intel_gvt *gvt) | |
186 | { | |
187 | struct intel_gvt_workload_scheduler *scheduler = | |
188 | &gvt->scheduler; | |
189 | struct tbs_sched_data *data = scheduler->sched_data; | |
190 | ||
191 | cancel_delayed_work(&data->work); | |
192 | kfree(data); | |
193 | scheduler->sched_data = NULL; | |
194 | } | |
195 | ||
196 | static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) | |
197 | { | |
198 | struct tbs_vgpu_data *data; | |
199 | ||
200 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
201 | if (!data) | |
202 | return -ENOMEM; | |
203 | ||
204 | data->vgpu = vgpu; | |
205 | INIT_LIST_HEAD(&data->list); | |
206 | ||
207 | vgpu->sched_data = data; | |
208 | return 0; | |
209 | } | |
210 | ||
211 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) | |
212 | { | |
213 | kfree(vgpu->sched_data); | |
214 | vgpu->sched_data = NULL; | |
215 | } | |
216 | ||
217 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | |
218 | { | |
219 | struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; | |
220 | struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; | |
221 | ||
222 | if (!list_empty(&vgpu_data->list)) | |
223 | return; | |
224 | ||
225 | list_add_tail(&vgpu_data->list, &sched_data->runq_head); | |
2958b901 | 226 | schedule_delayed_work(&sched_data->work, 0); |
4b63960e ZW |
227 | } |
228 | ||
229 | static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) | |
230 | { | |
231 | struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; | |
232 | ||
233 | list_del_init(&vgpu_data->list); | |
234 | } | |
235 | ||
999ccb40 | 236 | static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { |
4b63960e ZW |
237 | .init = tbs_sched_init, |
238 | .clean = tbs_sched_clean, | |
239 | .init_vgpu = tbs_sched_init_vgpu, | |
240 | .clean_vgpu = tbs_sched_clean_vgpu, | |
241 | .start_schedule = tbs_sched_start_schedule, | |
242 | .stop_schedule = tbs_sched_stop_schedule, | |
243 | }; | |
244 | ||
245 | int intel_gvt_init_sched_policy(struct intel_gvt *gvt) | |
246 | { | |
247 | gvt->scheduler.sched_ops = &tbs_schedule_ops; | |
248 | ||
249 | return gvt->scheduler.sched_ops->init(gvt); | |
250 | } | |
251 | ||
252 | void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) | |
253 | { | |
254 | gvt->scheduler.sched_ops->clean(gvt); | |
255 | } | |
256 | ||
257 | int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) | |
258 | { | |
259 | return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); | |
260 | } | |
261 | ||
262 | void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) | |
263 | { | |
264 | vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); | |
265 | } | |
266 | ||
267 | void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) | |
268 | { | |
269 | gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); | |
270 | ||
271 | vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); | |
272 | } | |
273 | ||
274 | void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |
275 | { | |
276 | struct intel_gvt_workload_scheduler *scheduler = | |
277 | &vgpu->gvt->scheduler; | |
278 | ||
279 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); | |
280 | ||
281 | scheduler->sched_ops->stop_schedule(vgpu); | |
282 | ||
283 | if (scheduler->next_vgpu == vgpu) | |
284 | scheduler->next_vgpu = NULL; | |
285 | ||
286 | if (scheduler->current_vgpu == vgpu) { | |
287 | /* stop workload dispatching */ | |
288 | scheduler->need_reschedule = true; | |
289 | scheduler->current_vgpu = NULL; | |
290 | } | |
291 | } |