]>
Commit | Line | Data |
---|---|---|
e39c5add ZW |
1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Ke Yu | |
25 | * Kevin Tian <kevin.tian@intel.com> | |
26 | * Dexuan Cui | |
27 | * | |
28 | * Contributors: | |
29 | * Tina Zhang <tina.zhang@intel.com> | |
30 | * Min He <min.he@intel.com> | |
31 | * Niu Bing <bing.niu@intel.com> | |
32 | * Zhi Wang <zhi.a.wang@intel.com> | |
33 | * | |
34 | */ | |
35 | ||
36 | #include "i915_drv.h" | |
feddf6e8 | 37 | #include "gvt.h" |
e39c5add ZW |
38 | |
39 | /** | |
40 | * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset | |
41 | * @vgpu: a vGPU | |
42 | * | |
43 | * Returns: | |
44 | * Zero on success, negative error code if failed | |
45 | */ | |
46 | int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) | |
47 | { | |
48 | u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) & | |
49 | ~GENMASK(3, 0); | |
50 | return gpa - gttmmio_gpa; | |
51 | } | |
52 | ||
53 | #define reg_is_mmio(gvt, reg) \ | |
54 | (reg >= 0 && reg < gvt->device_info.mmio_size) | |
55 | ||
56 | #define reg_is_gtt(gvt, reg) \ | |
57 | (reg >= gvt->device_info.gtt_start_offset \ | |
58 | && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) | |
59 | ||
fd64be63 MH |
60 | static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, |
61 | void *p_data, unsigned int bytes, bool read) | |
62 | { | |
63 | struct intel_gvt *gvt = NULL; | |
64 | void *pt = NULL; | |
65 | unsigned int offset = 0; | |
66 | ||
67 | if (!vgpu || !p_data) | |
68 | return; | |
69 | ||
70 | gvt = vgpu->gvt; | |
71 | mutex_lock(&gvt->lock); | |
72 | offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); | |
73 | if (reg_is_mmio(gvt, offset)) { | |
74 | if (read) | |
75 | intel_vgpu_default_mmio_read(vgpu, offset, p_data, | |
76 | bytes); | |
77 | else | |
78 | intel_vgpu_default_mmio_write(vgpu, offset, p_data, | |
79 | bytes); | |
80 | } else if (reg_is_gtt(gvt, offset) && | |
81 | vgpu->gtt.ggtt_mm->virtual_page_table) { | |
82 | offset -= gvt->device_info.gtt_start_offset; | |
83 | pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; | |
84 | if (read) | |
85 | memcpy(p_data, pt, bytes); | |
86 | else | |
87 | memcpy(pt, p_data, bytes); | |
88 | ||
89 | } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { | |
90 | struct intel_vgpu_guest_page *gp; | |
91 | ||
92 | /* Since we enter the failsafe mode early during guest boot, | |
93 | * guest may not have chance to set up its ppgtt table, so | |
94 | * there should not be any wp pages for guest. Keep the wp | |
95 | * related code here in case we need to handle it in furture. | |
96 | */ | |
97 | gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); | |
98 | if (gp) { | |
99 | /* remove write protection to prevent furture traps */ | |
100 | intel_vgpu_clean_guest_page(vgpu, gp); | |
101 | if (read) | |
102 | intel_gvt_hypervisor_read_gpa(vgpu, pa, | |
103 | p_data, bytes); | |
104 | else | |
105 | intel_gvt_hypervisor_write_gpa(vgpu, pa, | |
106 | p_data, bytes); | |
107 | } | |
108 | } | |
109 | mutex_unlock(&gvt->lock); | |
110 | } | |
111 | ||
e39c5add ZW |
112 | /** |
113 | * intel_vgpu_emulate_mmio_read - emulate MMIO read | |
114 | * @vgpu: a vGPU | |
115 | * @pa: guest physical address | |
116 | * @p_data: data return buffer | |
117 | * @bytes: access data length | |
118 | * | |
119 | * Returns: | |
120 | * Zero on success, negative error code if failed | |
121 | */ | |
9ec1e66b | 122 | int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, |
e39c5add ZW |
123 | void *p_data, unsigned int bytes) |
124 | { | |
e39c5add ZW |
125 | struct intel_gvt *gvt = vgpu->gvt; |
126 | struct intel_gvt_mmio_info *mmio; | |
127 | unsigned int offset = 0; | |
128 | int ret = -EINVAL; | |
129 | ||
fd64be63 MH |
130 | |
131 | if (vgpu->failsafe) { | |
132 | failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); | |
133 | return 0; | |
134 | } | |
e39c5add ZW |
135 | mutex_lock(&gvt->lock); |
136 | ||
137 | if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { | |
138 | struct intel_vgpu_guest_page *gp; | |
139 | ||
140 | gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); | |
141 | if (gp) { | |
142 | ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, | |
143 | p_data, bytes); | |
144 | if (ret) { | |
695fbc08 | 145 | gvt_vgpu_err("guest page read error %d, " |
e39c5add | 146 | "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", |
695fbc08 TZ |
147 | ret, gp->gfn, pa, *(u32 *)p_data, |
148 | bytes); | |
e39c5add ZW |
149 | } |
150 | mutex_unlock(&gvt->lock); | |
151 | return ret; | |
152 | } | |
153 | } | |
154 | ||
155 | offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); | |
156 | ||
157 | if (WARN_ON(bytes > 8)) | |
158 | goto err; | |
159 | ||
160 | if (reg_is_gtt(gvt, offset)) { | |
161 | if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) | |
162 | goto err; | |
163 | if (WARN_ON(bytes != 4 && bytes != 8)) | |
164 | goto err; | |
165 | if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) | |
166 | goto err; | |
167 | ||
168 | ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset, | |
169 | p_data, bytes); | |
170 | if (ret) | |
171 | goto err; | |
172 | mutex_unlock(&gvt->lock); | |
173 | return ret; | |
174 | } | |
175 | ||
176 | if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { | |
177 | ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); | |
178 | mutex_unlock(&gvt->lock); | |
179 | return ret; | |
180 | } | |
181 | ||
182 | if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) | |
183 | goto err; | |
184 | ||
e39c5add ZW |
185 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { |
186 | if (WARN_ON(!IS_ALIGNED(offset, bytes))) | |
187 | goto err; | |
188 | } | |
189 | ||
901a14b7 | 190 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); |
e39c5add ZW |
191 | if (mmio) { |
192 | if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { | |
193 | if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) | |
194 | goto err; | |
195 | if (WARN_ON(mmio->offset != offset)) | |
196 | goto err; | |
197 | } | |
198 | ret = mmio->read(vgpu, offset, p_data, bytes); | |
901a14b7 | 199 | } else { |
e39c5add ZW |
200 | ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); |
201 | ||
901a14b7 | 202 | if (!vgpu->mmio.disable_warn_untrack) { |
695fbc08 TZ |
203 | gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n", |
204 | offset, bytes, *(u32 *)p_data); | |
901a14b7 PZ |
205 | |
206 | if (offset == 0x206c) { | |
695fbc08 TZ |
207 | gvt_vgpu_err("------------------------------------------\n"); |
208 | gvt_vgpu_err("likely triggers a gfx reset\n"); | |
209 | gvt_vgpu_err("------------------------------------------\n"); | |
901a14b7 PZ |
210 | vgpu->mmio.disable_warn_untrack = true; |
211 | } | |
212 | } | |
213 | } | |
214 | ||
e39c5add ZW |
215 | if (ret) |
216 | goto err; | |
217 | ||
218 | intel_gvt_mmio_set_accessed(gvt, offset); | |
219 | mutex_unlock(&gvt->lock); | |
220 | return 0; | |
221 | err: | |
695fbc08 TZ |
222 | gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", |
223 | offset, bytes); | |
e39c5add ZW |
224 | mutex_unlock(&gvt->lock); |
225 | return ret; | |
226 | } | |
227 | ||
228 | /** | |
229 | * intel_vgpu_emulate_mmio_write - emulate MMIO write | |
230 | * @vgpu: a vGPU | |
231 | * @pa: guest physical address | |
232 | * @p_data: write data buffer | |
233 | * @bytes: access data length | |
234 | * | |
235 | * Returns: | |
236 | * Zero on success, negative error code if failed | |
237 | */ | |
9ec1e66b | 238 | int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, |
e39c5add ZW |
239 | void *p_data, unsigned int bytes) |
240 | { | |
e39c5add ZW |
241 | struct intel_gvt *gvt = vgpu->gvt; |
242 | struct intel_gvt_mmio_info *mmio; | |
243 | unsigned int offset = 0; | |
244 | u32 old_vreg = 0, old_sreg = 0; | |
245 | int ret = -EINVAL; | |
246 | ||
fd64be63 MH |
247 | if (vgpu->failsafe) { |
248 | failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false); | |
249 | return 0; | |
250 | } | |
251 | ||
e39c5add ZW |
252 | mutex_lock(&gvt->lock); |
253 | ||
254 | if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { | |
255 | struct intel_vgpu_guest_page *gp; | |
256 | ||
257 | gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); | |
258 | if (gp) { | |
259 | ret = gp->handler(gp, pa, p_data, bytes); | |
260 | if (ret) { | |
695fbc08 TZ |
261 | gvt_err("guest page write error %d, " |
262 | "gfn 0x%lx, pa 0x%llx, " | |
263 | "var 0x%x, len %d\n", | |
264 | ret, gp->gfn, pa, | |
265 | *(u32 *)p_data, bytes); | |
e39c5add ZW |
266 | } |
267 | mutex_unlock(&gvt->lock); | |
268 | return ret; | |
269 | } | |
270 | } | |
271 | ||
272 | offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); | |
273 | ||
274 | if (WARN_ON(bytes > 8)) | |
275 | goto err; | |
276 | ||
277 | if (reg_is_gtt(gvt, offset)) { | |
278 | if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) | |
279 | goto err; | |
280 | if (WARN_ON(bytes != 4 && bytes != 8)) | |
281 | goto err; | |
282 | if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) | |
283 | goto err; | |
284 | ||
285 | ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset, | |
286 | p_data, bytes); | |
287 | if (ret) | |
288 | goto err; | |
289 | mutex_unlock(&gvt->lock); | |
290 | return ret; | |
291 | } | |
292 | ||
293 | if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { | |
294 | ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); | |
295 | mutex_unlock(&gvt->lock); | |
296 | return ret; | |
297 | } | |
298 | ||
299 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); | |
300 | if (!mmio && !vgpu->mmio.disable_warn_untrack) | |
bab05930 | 301 | gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n", |
e39c5add ZW |
302 | vgpu->id, offset, bytes, *(u32 *)p_data); |
303 | ||
304 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { | |
305 | if (WARN_ON(!IS_ALIGNED(offset, bytes))) | |
306 | goto err; | |
307 | } | |
308 | ||
309 | if (mmio) { | |
310 | u64 ro_mask = mmio->ro_mask; | |
311 | ||
312 | if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { | |
313 | if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) | |
314 | goto err; | |
315 | if (WARN_ON(mmio->offset != offset)) | |
316 | goto err; | |
317 | } | |
318 | ||
319 | if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { | |
320 | old_vreg = vgpu_vreg(vgpu, offset); | |
321 | old_sreg = vgpu_sreg(vgpu, offset); | |
322 | } | |
323 | ||
324 | if (!ro_mask) { | |
325 | ret = mmio->write(vgpu, offset, p_data, bytes); | |
326 | } else { | |
327 | /* Protect RO bits like HW */ | |
328 | u64 data = 0; | |
329 | ||
330 | /* all register bits are RO. */ | |
331 | if (ro_mask == ~(u64)0) { | |
695fbc08 TZ |
332 | gvt_vgpu_err("try to write RO reg %x\n", |
333 | offset); | |
e39c5add ZW |
334 | ret = 0; |
335 | goto out; | |
336 | } | |
337 | /* keep the RO bits in the virtual register */ | |
338 | memcpy(&data, p_data, bytes); | |
339 | data &= ~mmio->ro_mask; | |
340 | data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask; | |
341 | ret = mmio->write(vgpu, offset, &data, bytes); | |
342 | } | |
343 | ||
344 | /* higher 16bits of mode ctl regs are mask bits for change */ | |
345 | if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { | |
346 | u32 mask = vgpu_vreg(vgpu, offset) >> 16; | |
347 | ||
348 | vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | |
349 | | (vgpu_vreg(vgpu, offset) & mask); | |
350 | vgpu_sreg(vgpu, offset) = (old_sreg & ~mask) | |
351 | | (vgpu_sreg(vgpu, offset) & mask); | |
352 | } | |
353 | } else | |
354 | ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, | |
355 | bytes); | |
356 | if (ret) | |
357 | goto err; | |
358 | out: | |
359 | intel_gvt_mmio_set_accessed(gvt, offset); | |
360 | mutex_unlock(&gvt->lock); | |
361 | return 0; | |
362 | err: | |
695fbc08 TZ |
363 | gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, |
364 | bytes); | |
e39c5add ZW |
365 | mutex_unlock(&gvt->lock); |
366 | return ret; | |
367 | } | |
cdcc4347 | 368 | |
97d58f7d CD |
369 | |
370 | /** | |
371 | * intel_vgpu_reset_mmio - reset virtual MMIO space | |
372 | * @vgpu: a vGPU | |
373 | * | |
374 | */ | |
375 | void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) | |
376 | { | |
377 | struct intel_gvt *gvt = vgpu->gvt; | |
378 | const struct intel_gvt_device_info *info = &gvt->device_info; | |
379 | ||
380 | memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); | |
381 | memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); | |
382 | ||
383 | vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; | |
384 | ||
385 | /* set the bit 0:2(Core C-State ) to C0 */ | |
386 | vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; | |
d1be371d ZX |
387 | |
388 | vgpu->mmio.disable_warn_untrack = false; | |
97d58f7d CD |
389 | } |
390 | ||
cdcc4347 CD |
391 | /** |
392 | * intel_vgpu_init_mmio - init MMIO space | |
393 | * @vgpu: a vGPU | |
394 | * | |
395 | * Returns: | |
396 | * Zero on success, negative error code if failed | |
397 | */ | |
398 | int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) | |
399 | { | |
400 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; | |
401 | ||
97d58f7d CD |
402 | vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); |
403 | if (!vgpu->mmio.vreg) | |
404 | return -ENOMEM; | |
cdcc4347 | 405 | |
97d58f7d | 406 | vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; |
cdcc4347 | 407 | |
97d58f7d | 408 | intel_vgpu_reset_mmio(vgpu); |
cdcc4347 CD |
409 | |
410 | return 0; | |
411 | } | |
412 | ||
413 | /** | |
414 | * intel_vgpu_clean_mmio - clean MMIO space | |
415 | * @vgpu: a vGPU | |
416 | * | |
417 | */ | |
418 | void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) | |
419 | { | |
420 | vfree(vgpu->mmio.vreg); | |
421 | vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; | |
422 | } |