2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Zhi Wang <zhi.a.wang@intel.com>
37 static bool vgpu_has_pending_workload(struct intel_vgpu
*vgpu
)
39 enum intel_engine_id i
;
40 struct intel_engine_cs
*engine
;
42 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
) {
43 if (!list_empty(workload_q_head(vgpu
, i
)))
50 static void try_to_schedule_next_vgpu(struct intel_gvt
*gvt
)
52 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
53 enum intel_engine_id i
;
54 struct intel_engine_cs
*engine
;
56 /* no target to schedule */
57 if (!scheduler
->next_vgpu
)
60 gvt_dbg_sched("try to schedule next vgpu %d\n",
61 scheduler
->next_vgpu
->id
);
64 * after the flag is set, workload dispatch thread will
65 * stop dispatching workload for current vgpu
67 scheduler
->need_reschedule
= true;
69 /* still have uncompleted workload? */
70 for_each_engine(engine
, gvt
->dev_priv
, i
) {
71 if (scheduler
->current_workload
[i
]) {
72 gvt_dbg_sched("still have running workload\n");
77 gvt_dbg_sched("switch to next vgpu %d\n",
78 scheduler
->next_vgpu
->id
);
80 /* switch current vgpu */
81 scheduler
->current_vgpu
= scheduler
->next_vgpu
;
82 scheduler
->next_vgpu
= NULL
;
84 scheduler
->need_reschedule
= false;
86 /* wake up workload dispatch thread */
87 for_each_engine(engine
, gvt
->dev_priv
, i
)
88 wake_up(&scheduler
->waitq
[i
]);
91 struct tbs_vgpu_data
{
92 struct list_head list
;
93 struct intel_vgpu
*vgpu
;
94 /* put some per-vgpu sched stats here */
97 struct tbs_sched_data
{
98 struct intel_gvt
*gvt
;
99 struct delayed_work work
;
100 unsigned long period
;
101 struct list_head runq_head
;
104 #define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
106 static void tbs_sched_func(struct work_struct
*work
)
108 struct tbs_sched_data
*sched_data
= container_of(work
,
109 struct tbs_sched_data
, work
.work
);
110 struct tbs_vgpu_data
*vgpu_data
;
112 struct intel_gvt
*gvt
= sched_data
->gvt
;
113 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
115 struct intel_vgpu
*vgpu
= NULL
;
116 struct list_head
*pos
, *head
;
118 mutex_lock(&gvt
->lock
);
120 /* no vgpu or has already had a target */
121 if (list_empty(&sched_data
->runq_head
) || scheduler
->next_vgpu
)
124 if (scheduler
->current_vgpu
) {
125 vgpu_data
= scheduler
->current_vgpu
->sched_data
;
126 head
= &vgpu_data
->list
;
128 head
= &sched_data
->runq_head
;
131 /* search a vgpu with pending workload */
132 list_for_each(pos
, head
) {
133 if (pos
== &sched_data
->runq_head
)
136 vgpu_data
= container_of(pos
, struct tbs_vgpu_data
, list
);
137 if (!vgpu_has_pending_workload(vgpu_data
->vgpu
))
140 vgpu
= vgpu_data
->vgpu
;
145 scheduler
->next_vgpu
= vgpu
;
146 gvt_dbg_sched("pick next vgpu %d\n", vgpu
->id
);
149 if (scheduler
->next_vgpu
) {
150 gvt_dbg_sched("try to schedule next vgpu %d\n",
151 scheduler
->next_vgpu
->id
);
152 try_to_schedule_next_vgpu(gvt
);
156 * still have vgpu on runq
157 * or last schedule haven't finished due to running workload
159 if (!list_empty(&sched_data
->runq_head
) || scheduler
->next_vgpu
)
160 schedule_delayed_work(&sched_data
->work
, sched_data
->period
);
162 mutex_unlock(&gvt
->lock
);
165 static int tbs_sched_init(struct intel_gvt
*gvt
)
167 struct intel_gvt_workload_scheduler
*scheduler
=
170 struct tbs_sched_data
*data
;
172 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
176 INIT_LIST_HEAD(&data
->runq_head
);
177 INIT_DELAYED_WORK(&data
->work
, tbs_sched_func
);
178 data
->period
= GVT_DEFAULT_TIME_SLICE
;
181 scheduler
->sched_data
= data
;
185 static void tbs_sched_clean(struct intel_gvt
*gvt
)
187 struct intel_gvt_workload_scheduler
*scheduler
=
189 struct tbs_sched_data
*data
= scheduler
->sched_data
;
191 cancel_delayed_work(&data
->work
);
193 scheduler
->sched_data
= NULL
;
196 static int tbs_sched_init_vgpu(struct intel_vgpu
*vgpu
)
198 struct tbs_vgpu_data
*data
;
200 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
205 INIT_LIST_HEAD(&data
->list
);
207 vgpu
->sched_data
= data
;
211 static void tbs_sched_clean_vgpu(struct intel_vgpu
*vgpu
)
213 kfree(vgpu
->sched_data
);
214 vgpu
->sched_data
= NULL
;
217 static void tbs_sched_start_schedule(struct intel_vgpu
*vgpu
)
219 struct tbs_sched_data
*sched_data
= vgpu
->gvt
->scheduler
.sched_data
;
220 struct tbs_vgpu_data
*vgpu_data
= vgpu
->sched_data
;
222 if (!list_empty(&vgpu_data
->list
))
225 list_add_tail(&vgpu_data
->list
, &sched_data
->runq_head
);
226 schedule_delayed_work(&sched_data
->work
, sched_data
->period
);
229 static void tbs_sched_stop_schedule(struct intel_vgpu
*vgpu
)
231 struct tbs_vgpu_data
*vgpu_data
= vgpu
->sched_data
;
233 list_del_init(&vgpu_data
->list
);
236 static struct intel_gvt_sched_policy_ops tbs_schedule_ops
= {
237 .init
= tbs_sched_init
,
238 .clean
= tbs_sched_clean
,
239 .init_vgpu
= tbs_sched_init_vgpu
,
240 .clean_vgpu
= tbs_sched_clean_vgpu
,
241 .start_schedule
= tbs_sched_start_schedule
,
242 .stop_schedule
= tbs_sched_stop_schedule
,
245 int intel_gvt_init_sched_policy(struct intel_gvt
*gvt
)
247 gvt
->scheduler
.sched_ops
= &tbs_schedule_ops
;
249 return gvt
->scheduler
.sched_ops
->init(gvt
);
252 void intel_gvt_clean_sched_policy(struct intel_gvt
*gvt
)
254 gvt
->scheduler
.sched_ops
->clean(gvt
);
257 int intel_vgpu_init_sched_policy(struct intel_vgpu
*vgpu
)
259 return vgpu
->gvt
->scheduler
.sched_ops
->init_vgpu(vgpu
);
262 void intel_vgpu_clean_sched_policy(struct intel_vgpu
*vgpu
)
264 vgpu
->gvt
->scheduler
.sched_ops
->clean_vgpu(vgpu
);
267 void intel_vgpu_start_schedule(struct intel_vgpu
*vgpu
)
269 gvt_dbg_core("vgpu%d: start schedule\n", vgpu
->id
);
271 vgpu
->gvt
->scheduler
.sched_ops
->start_schedule(vgpu
);
274 void intel_vgpu_stop_schedule(struct intel_vgpu
*vgpu
)
276 struct intel_gvt_workload_scheduler
*scheduler
=
277 &vgpu
->gvt
->scheduler
;
279 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu
->id
);
281 scheduler
->sched_ops
->stop_schedule(vgpu
);
283 if (scheduler
->next_vgpu
== vgpu
)
284 scheduler
->next_vgpu
= NULL
;
286 if (scheduler
->current_vgpu
== vgpu
) {
287 /* stop workload dispatching */
288 scheduler
->need_reschedule
= true;
289 scheduler
->current_vgpu
= NULL
;