2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Zhi Wang <zhi.a.wang@intel.com>
37 static bool vgpu_has_pending_workload(struct intel_vgpu
*vgpu
)
39 enum intel_engine_id i
;
40 struct intel_engine_cs
*engine
;
42 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
) {
43 if (!list_empty(workload_q_head(vgpu
, i
)))
50 struct vgpu_sched_data
{
51 struct list_head lru_list
;
52 struct intel_vgpu
*vgpu
;
54 ktime_t sched_in_time
;
55 ktime_t sched_out_time
;
60 struct vgpu_sched_ctl sched_ctl
;
63 struct gvt_sched_data
{
64 struct intel_gvt
*gvt
;
67 struct list_head lru_runq_head
;
70 static void vgpu_update_timeslice(struct intel_vgpu
*pre_vgpu
)
73 struct vgpu_sched_data
*vgpu_data
= pre_vgpu
->sched_data
;
75 delta_ts
= vgpu_data
->sched_out_time
- vgpu_data
->sched_in_time
;
77 vgpu_data
->sched_time
+= delta_ts
;
78 vgpu_data
->left_ts
-= delta_ts
;
81 #define GVT_TS_BALANCE_PERIOD_MS 100
82 #define GVT_TS_BALANCE_STAGE_NUM 10
84 static void gvt_balance_timeslice(struct gvt_sched_data
*sched_data
)
86 struct vgpu_sched_data
*vgpu_data
;
87 struct list_head
*pos
;
88 static uint64_t stage_check
;
89 int stage
= stage_check
++ % GVT_TS_BALANCE_STAGE_NUM
;
91 /* The timeslice accumulation reset at stage 0, which is
92 * allocated again without adding previous debt.
96 ktime_t fair_timeslice
;
98 list_for_each(pos
, &sched_data
->lru_runq_head
) {
99 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
100 total_weight
+= vgpu_data
->sched_ctl
.weight
;
103 list_for_each(pos
, &sched_data
->lru_runq_head
) {
104 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
105 fair_timeslice
= ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS
) *
106 vgpu_data
->sched_ctl
.weight
/
109 vgpu_data
->allocated_ts
= fair_timeslice
;
110 vgpu_data
->left_ts
= vgpu_data
->allocated_ts
;
113 list_for_each(pos
, &sched_data
->lru_runq_head
) {
114 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
116 /* timeslice for next 100ms should add the left/debt
117 * slice of previous stages.
119 vgpu_data
->left_ts
+= vgpu_data
->allocated_ts
;
124 static void try_to_schedule_next_vgpu(struct intel_gvt
*gvt
)
126 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
127 enum intel_engine_id i
;
128 struct intel_engine_cs
*engine
;
129 struct vgpu_sched_data
*vgpu_data
;
132 /* no need to schedule if next_vgpu is the same with current_vgpu,
133 * let scheduler chose next_vgpu again by setting it to NULL.
135 if (scheduler
->next_vgpu
== scheduler
->current_vgpu
) {
136 scheduler
->next_vgpu
= NULL
;
141 * after the flag is set, workload dispatch thread will
142 * stop dispatching workload for current vgpu
144 scheduler
->need_reschedule
= true;
146 /* still have uncompleted workload? */
147 for_each_engine(engine
, gvt
->dev_priv
, i
) {
148 if (scheduler
->current_workload
[i
])
152 cur_time
= ktime_get();
153 if (scheduler
->current_vgpu
) {
154 vgpu_data
= scheduler
->current_vgpu
->sched_data
;
155 vgpu_data
->sched_out_time
= cur_time
;
156 vgpu_update_timeslice(scheduler
->current_vgpu
);
158 vgpu_data
= scheduler
->next_vgpu
->sched_data
;
159 vgpu_data
->sched_in_time
= cur_time
;
161 /* switch current vgpu */
162 scheduler
->current_vgpu
= scheduler
->next_vgpu
;
163 scheduler
->next_vgpu
= NULL
;
165 scheduler
->need_reschedule
= false;
167 /* wake up workload dispatch thread */
168 for_each_engine(engine
, gvt
->dev_priv
, i
)
169 wake_up(&scheduler
->waitq
[i
]);
172 static struct intel_vgpu
*find_busy_vgpu(struct gvt_sched_data
*sched_data
)
174 struct vgpu_sched_data
*vgpu_data
;
175 struct intel_vgpu
*vgpu
= NULL
;
176 struct list_head
*head
= &sched_data
->lru_runq_head
;
177 struct list_head
*pos
;
179 /* search a vgpu with pending workload */
180 list_for_each(pos
, head
) {
182 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
183 if (!vgpu_has_pending_workload(vgpu_data
->vgpu
))
186 /* Return the vGPU only if it has time slice left */
187 if (vgpu_data
->left_ts
> 0) {
188 vgpu
= vgpu_data
->vgpu
;
197 #define GVT_DEFAULT_TIME_SLICE 1000000
199 static void tbs_sched_func(struct gvt_sched_data
*sched_data
)
201 struct intel_gvt
*gvt
= sched_data
->gvt
;
202 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
203 struct vgpu_sched_data
*vgpu_data
;
204 struct intel_vgpu
*vgpu
= NULL
;
205 /* no active vgpu or has already had a target */
206 if (list_empty(&sched_data
->lru_runq_head
) || scheduler
->next_vgpu
)
209 vgpu
= find_busy_vgpu(sched_data
);
211 scheduler
->next_vgpu
= vgpu
;
213 /* Move the last used vGPU to the tail of lru_list */
214 vgpu_data
= vgpu
->sched_data
;
215 list_del_init(&vgpu_data
->lru_list
);
216 list_add_tail(&vgpu_data
->lru_list
,
217 &sched_data
->lru_runq_head
);
219 scheduler
->next_vgpu
= gvt
->idle_vgpu
;
222 if (scheduler
->next_vgpu
)
223 try_to_schedule_next_vgpu(gvt
);
226 void intel_gvt_schedule(struct intel_gvt
*gvt
)
228 struct gvt_sched_data
*sched_data
= gvt
->scheduler
.sched_data
;
229 static uint64_t timer_check
;
231 mutex_lock(&gvt
->lock
);
233 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED
,
234 (void *)&gvt
->service_request
)) {
235 if (!(timer_check
++ % GVT_TS_BALANCE_PERIOD_MS
))
236 gvt_balance_timeslice(sched_data
);
238 clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED
, (void *)&gvt
->service_request
);
240 tbs_sched_func(sched_data
);
242 mutex_unlock(&gvt
->lock
);
245 static enum hrtimer_restart
tbs_timer_fn(struct hrtimer
*timer_data
)
247 struct gvt_sched_data
*data
;
249 data
= container_of(timer_data
, struct gvt_sched_data
, timer
);
251 intel_gvt_request_service(data
->gvt
, INTEL_GVT_REQUEST_SCHED
);
253 hrtimer_add_expires_ns(&data
->timer
, data
->period
);
255 return HRTIMER_RESTART
;
258 static int tbs_sched_init(struct intel_gvt
*gvt
)
260 struct intel_gvt_workload_scheduler
*scheduler
=
263 struct gvt_sched_data
*data
;
265 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
269 INIT_LIST_HEAD(&data
->lru_runq_head
);
270 hrtimer_init(&data
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
271 data
->timer
.function
= tbs_timer_fn
;
272 data
->period
= GVT_DEFAULT_TIME_SLICE
;
275 scheduler
->sched_data
= data
;
280 static void tbs_sched_clean(struct intel_gvt
*gvt
)
282 struct intel_gvt_workload_scheduler
*scheduler
=
284 struct gvt_sched_data
*data
= scheduler
->sched_data
;
286 hrtimer_cancel(&data
->timer
);
289 scheduler
->sched_data
= NULL
;
292 static int tbs_sched_init_vgpu(struct intel_vgpu
*vgpu
)
294 struct vgpu_sched_data
*data
;
296 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
300 data
->sched_ctl
.weight
= vgpu
->sched_ctl
.weight
;
302 INIT_LIST_HEAD(&data
->lru_list
);
304 vgpu
->sched_data
= data
;
309 static void tbs_sched_clean_vgpu(struct intel_vgpu
*vgpu
)
311 struct intel_gvt_workload_scheduler
*scheduler
= &vgpu
->gvt
->scheduler
;
314 kfree(vgpu
->sched_data
);
315 vgpu
->sched_data
= NULL
;
317 spin_lock_bh(&scheduler
->mmio_context_lock
);
318 for (ring_id
= 0; ring_id
< I915_NUM_ENGINES
; ring_id
++) {
319 if (scheduler
->engine_owner
[ring_id
] == vgpu
) {
320 intel_gvt_switch_mmio(vgpu
, NULL
, ring_id
);
321 scheduler
->engine_owner
[ring_id
] = NULL
;
324 spin_unlock_bh(&scheduler
->mmio_context_lock
);
327 static void tbs_sched_start_schedule(struct intel_vgpu
*vgpu
)
329 struct gvt_sched_data
*sched_data
= vgpu
->gvt
->scheduler
.sched_data
;
330 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
332 if (!list_empty(&vgpu_data
->lru_list
))
335 list_add_tail(&vgpu_data
->lru_list
, &sched_data
->lru_runq_head
);
337 if (!hrtimer_active(&sched_data
->timer
))
338 hrtimer_start(&sched_data
->timer
, ktime_add_ns(ktime_get(),
339 sched_data
->period
), HRTIMER_MODE_ABS
);
342 static void tbs_sched_stop_schedule(struct intel_vgpu
*vgpu
)
344 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
346 list_del_init(&vgpu_data
->lru_list
);
349 static struct intel_gvt_sched_policy_ops tbs_schedule_ops
= {
350 .init
= tbs_sched_init
,
351 .clean
= tbs_sched_clean
,
352 .init_vgpu
= tbs_sched_init_vgpu
,
353 .clean_vgpu
= tbs_sched_clean_vgpu
,
354 .start_schedule
= tbs_sched_start_schedule
,
355 .stop_schedule
= tbs_sched_stop_schedule
,
358 int intel_gvt_init_sched_policy(struct intel_gvt
*gvt
)
360 gvt
->scheduler
.sched_ops
= &tbs_schedule_ops
;
362 return gvt
->scheduler
.sched_ops
->init(gvt
);
365 void intel_gvt_clean_sched_policy(struct intel_gvt
*gvt
)
367 gvt
->scheduler
.sched_ops
->clean(gvt
);
370 int intel_vgpu_init_sched_policy(struct intel_vgpu
*vgpu
)
372 return vgpu
->gvt
->scheduler
.sched_ops
->init_vgpu(vgpu
);
375 void intel_vgpu_clean_sched_policy(struct intel_vgpu
*vgpu
)
377 vgpu
->gvt
->scheduler
.sched_ops
->clean_vgpu(vgpu
);
380 void intel_vgpu_start_schedule(struct intel_vgpu
*vgpu
)
382 gvt_dbg_core("vgpu%d: start schedule\n", vgpu
->id
);
384 vgpu
->gvt
->scheduler
.sched_ops
->start_schedule(vgpu
);
387 void intel_vgpu_stop_schedule(struct intel_vgpu
*vgpu
)
389 struct intel_gvt_workload_scheduler
*scheduler
=
390 &vgpu
->gvt
->scheduler
;
392 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu
->id
);
394 scheduler
->sched_ops
->stop_schedule(vgpu
);
396 if (scheduler
->next_vgpu
== vgpu
)
397 scheduler
->next_vgpu
= NULL
;
399 if (scheduler
->current_vgpu
== vgpu
) {
400 /* stop workload dispatching */
401 scheduler
->need_reschedule
= true;
402 scheduler
->current_vgpu
= NULL
;