]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/gpu/drm/i915/gvt/mpt.h
Merge branch 'am335x-phy-fixes' into omap-for-v5.0/fixes-v2
[mirror_ubuntu-eoan-kernel.git] / drivers / gpu / drm / i915 / gvt / mpt.h
1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Dexuan Cui
26 * Jike Song <jike.song@intel.com>
27 *
28 * Contributors:
29 * Zhi Wang <zhi.a.wang@intel.com>
30 *
31 */
32
33 #ifndef _GVT_MPT_H_
34 #define _GVT_MPT_H_
35
36 /**
37 * DOC: Hypervisor Service APIs for GVT-g Core Logic
38 *
39 * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40 * logic. Each kind of hypervisor MPT module provides a collection of function
41 * callbacks and will be attached to GVT host when the driver is loading.
42 * GVT-g core logic will call these APIs to request specific services from
43 * hypervisor.
44 */
45
46 /**
47 * intel_gvt_hypervisor_host_init - init GVT-g host side
48 *
49 * Returns:
50 * Zero on success, negative error code if failed
51 */
52 static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53 void *gvt, const void *ops)
54 {
55 /* optional to provide */
56 if (!intel_gvt_host.mpt->host_init)
57 return 0;
58
59 return intel_gvt_host.mpt->host_init(dev, gvt, ops);
60 }
61
62 /**
63 * intel_gvt_hypervisor_host_exit - exit GVT-g host side
64 */
65 static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
66 void *gvt)
67 {
68 /* optional to provide */
69 if (!intel_gvt_host.mpt->host_exit)
70 return;
71
72 intel_gvt_host.mpt->host_exit(dev, gvt);
73 }
74
75 /**
76 * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
77 * related stuffs inside hypervisor.
78 *
79 * Returns:
80 * Zero on success, negative error code if failed.
81 */
82 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
83 {
84 /* optional to provide */
85 if (!intel_gvt_host.mpt->attach_vgpu)
86 return 0;
87
88 return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
89 }
90
91 /**
92 * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
93 * related stuffs inside hypervisor.
94 *
95 * Returns:
96 * Zero on success, negative error code if failed.
97 */
98 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
99 {
100 /* optional to provide */
101 if (!intel_gvt_host.mpt->detach_vgpu)
102 return;
103
104 intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
105 }
106
107 #define MSI_CAP_CONTROL(offset) (offset + 2)
108 #define MSI_CAP_ADDRESS(offset) (offset + 4)
109 #define MSI_CAP_DATA(offset) (offset + 8)
110 #define MSI_CAP_EN 0x1
111
112 /**
113 * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
114 *
115 * Returns:
116 * Zero on success, negative error code if failed.
117 */
118 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
119 {
120 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
121 u16 control, data;
122 u32 addr;
123 int ret;
124
125 control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
126 addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
127 data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
128
129 /* Do not generate MSI if MSIEN is disable */
130 if (!(control & MSI_CAP_EN))
131 return 0;
132
133 if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
134 return -EINVAL;
135
136 trace_inject_msi(vgpu->id, addr, data);
137
138 ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
139 if (ret)
140 return ret;
141 return 0;
142 }
143
144 /**
145 * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
146 * @p: host kernel virtual address
147 *
148 * Returns:
149 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
150 */
151 static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
152 {
153 return intel_gvt_host.mpt->from_virt_to_mfn(p);
154 }
155
156 /**
157 * intel_gvt_hypervisor_enable_page_track - track a guest page
158 * @vgpu: a vGPU
159 * @gfn: the gfn of guest
160 *
161 * Returns:
162 * Zero on success, negative error code if failed.
163 */
164 static inline int intel_gvt_hypervisor_enable_page_track(
165 struct intel_vgpu *vgpu, unsigned long gfn)
166 {
167 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
168 }
169
170 /**
171 * intel_gvt_hypervisor_disable_page_track - untrack a guest page
172 * @vgpu: a vGPU
173 * @gfn: the gfn of guest
174 *
175 * Returns:
176 * Zero on success, negative error code if failed.
177 */
178 static inline int intel_gvt_hypervisor_disable_page_track(
179 struct intel_vgpu *vgpu, unsigned long gfn)
180 {
181 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
182 }
183
184 /**
185 * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
186 * @vgpu: a vGPU
187 * @gpa: guest physical address
188 * @buf: host data buffer
189 * @len: data length
190 *
191 * Returns:
192 * Zero on success, negative error code if failed.
193 */
194 static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
195 unsigned long gpa, void *buf, unsigned long len)
196 {
197 return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
198 }
199
200 /**
201 * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
202 * @vgpu: a vGPU
203 * @gpa: guest physical address
204 * @buf: host data buffer
205 * @len: data length
206 *
207 * Returns:
208 * Zero on success, negative error code if failed.
209 */
210 static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
211 unsigned long gpa, void *buf, unsigned long len)
212 {
213 return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
214 }
215
216 /**
217 * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
218 * @vgpu: a vGPU
219 * @gpfn: guest pfn
220 *
221 * Returns:
222 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
223 */
224 static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
225 struct intel_vgpu *vgpu, unsigned long gfn)
226 {
227 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
228 }
229
230 /**
231 * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
232 * @vgpu: a vGPU
233 * @gfn: guest pfn
234 * @size: page size
235 * @dma_addr: retrieve allocated dma addr
236 *
237 * Returns:
238 * 0 on success, negative error code if failed.
239 */
240 static inline int intel_gvt_hypervisor_dma_map_guest_page(
241 struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
242 dma_addr_t *dma_addr)
243 {
244 return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
245 dma_addr);
246 }
247
248 /**
249 * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
250 * @vgpu: a vGPU
251 * @dma_addr: the mapped dma addr
252 */
253 static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
254 struct intel_vgpu *vgpu, dma_addr_t dma_addr)
255 {
256 intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
257 }
258
259 /**
260 * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
261 * @vgpu: a vGPU
262 * @gfn: guest PFN
263 * @mfn: host PFN
264 * @nr: amount of PFNs
265 * @map: map or unmap
266 *
267 * Returns:
268 * Zero on success, negative error code if failed.
269 */
270 static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
271 struct intel_vgpu *vgpu, unsigned long gfn,
272 unsigned long mfn, unsigned int nr,
273 bool map)
274 {
275 /* a MPT implementation could have MMIO mapped elsewhere */
276 if (!intel_gvt_host.mpt->map_gfn_to_mfn)
277 return 0;
278
279 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
280 map);
281 }
282
283 /**
284 * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
285 * @vgpu: a vGPU
286 * @start: the beginning of the guest physical address region
287 * @end: the end of the guest physical address region
288 * @map: map or unmap
289 *
290 * Returns:
291 * Zero on success, negative error code if failed.
292 */
293 static inline int intel_gvt_hypervisor_set_trap_area(
294 struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
295 {
296 /* a MPT implementation could have MMIO trapped elsewhere */
297 if (!intel_gvt_host.mpt->set_trap_area)
298 return 0;
299
300 return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
301 }
302
303 /**
304 * intel_gvt_hypervisor_set_opregion - Set opregion for guest
305 * @vgpu: a vGPU
306 *
307 * Returns:
308 * Zero on success, negative error code if failed.
309 */
310 static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
311 {
312 if (!intel_gvt_host.mpt->set_opregion)
313 return 0;
314
315 return intel_gvt_host.mpt->set_opregion(vgpu);
316 }
317
318 /**
319 * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
320 * @vgpu: a vGPU
321 *
322 * Returns:
323 * Zero on success, negative error code if failed.
324 */
325 static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
326 {
327 if (!intel_gvt_host.mpt->get_vfio_device)
328 return 0;
329
330 return intel_gvt_host.mpt->get_vfio_device(vgpu);
331 }
332
333 /**
334 * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
335 * @vgpu: a vGPU
336 *
337 * Returns:
338 * Zero on success, negative error code if failed.
339 */
340 static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
341 {
342 if (!intel_gvt_host.mpt->put_vfio_device)
343 return;
344
345 intel_gvt_host.mpt->put_vfio_device(vgpu);
346 }
347
348 /**
349 * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
350 * @vgpu: a vGPU
351 * @gfn: guest PFN
352 *
353 * Returns:
354 * true on valid gfn, false on not.
355 */
356 static inline bool intel_gvt_hypervisor_is_valid_gfn(
357 struct intel_vgpu *vgpu, unsigned long gfn)
358 {
359 if (!intel_gvt_host.mpt->is_valid_gfn)
360 return true;
361
362 return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
363 }
364
365 #endif /* _GVT_MPT_H_ */