]>
Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "vmwgfx_drv.h" | |
760285e7 DH |
29 | #include <drm/ttm/ttm_bo_driver.h> |
30 | #include <drm/ttm/ttm_placement.h> | |
31 | #include <drm/ttm/ttm_page_alloc.h> | |
fb1d9738 JB |
32 | |
33 | static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | | |
34 | TTM_PL_FLAG_CACHED; | |
35 | ||
36 | static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |
37 | TTM_PL_FLAG_CACHED | | |
38 | TTM_PL_FLAG_NO_EVICT; | |
39 | ||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | |
41 | TTM_PL_FLAG_CACHED; | |
42 | ||
135cba0d TH |
43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
44 | TTM_PL_FLAG_CACHED; | |
45 | ||
d991ef03 JB |
46 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | |
47 | TTM_PL_FLAG_CACHED | | |
48 | TTM_PL_FLAG_NO_EVICT; | |
49 | ||
fb1d9738 JB |
50 | struct ttm_placement vmw_vram_placement = { |
51 | .fpfn = 0, | |
52 | .lpfn = 0, | |
53 | .num_placement = 1, | |
54 | .placement = &vram_placement_flags, | |
55 | .num_busy_placement = 1, | |
56 | .busy_placement = &vram_placement_flags | |
57 | }; | |
58 | ||
135cba0d TH |
59 | static uint32_t vram_gmr_placement_flags[] = { |
60 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | |
61 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | |
62 | }; | |
63 | ||
5bb39e81 TH |
64 | static uint32_t gmr_vram_placement_flags[] = { |
65 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, | |
66 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | |
67 | }; | |
68 | ||
135cba0d TH |
69 | struct ttm_placement vmw_vram_gmr_placement = { |
70 | .fpfn = 0, | |
71 | .lpfn = 0, | |
72 | .num_placement = 2, | |
73 | .placement = vram_gmr_placement_flags, | |
74 | .num_busy_placement = 1, | |
75 | .busy_placement = &gmr_placement_flags | |
76 | }; | |
77 | ||
d991ef03 JB |
78 | static uint32_t vram_gmr_ne_placement_flags[] = { |
79 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, | |
80 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | |
81 | }; | |
82 | ||
83 | struct ttm_placement vmw_vram_gmr_ne_placement = { | |
84 | .fpfn = 0, | |
85 | .lpfn = 0, | |
86 | .num_placement = 2, | |
87 | .placement = vram_gmr_ne_placement_flags, | |
88 | .num_busy_placement = 1, | |
89 | .busy_placement = &gmr_ne_placement_flags | |
90 | }; | |
91 | ||
8ba5152a TH |
92 | struct ttm_placement vmw_vram_sys_placement = { |
93 | .fpfn = 0, | |
94 | .lpfn = 0, | |
95 | .num_placement = 1, | |
96 | .placement = &vram_placement_flags, | |
97 | .num_busy_placement = 1, | |
98 | .busy_placement = &sys_placement_flags | |
99 | }; | |
100 | ||
fb1d9738 JB |
101 | struct ttm_placement vmw_vram_ne_placement = { |
102 | .fpfn = 0, | |
103 | .lpfn = 0, | |
104 | .num_placement = 1, | |
105 | .placement = &vram_ne_placement_flags, | |
106 | .num_busy_placement = 1, | |
107 | .busy_placement = &vram_ne_placement_flags | |
108 | }; | |
109 | ||
110 | struct ttm_placement vmw_sys_placement = { | |
111 | .fpfn = 0, | |
112 | .lpfn = 0, | |
113 | .num_placement = 1, | |
114 | .placement = &sys_placement_flags, | |
115 | .num_busy_placement = 1, | |
116 | .busy_placement = &sys_placement_flags | |
117 | }; | |
118 | ||
d991ef03 JB |
119 | static uint32_t evictable_placement_flags[] = { |
120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | |
121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | |
122 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | |
123 | }; | |
124 | ||
125 | struct ttm_placement vmw_evictable_placement = { | |
126 | .fpfn = 0, | |
127 | .lpfn = 0, | |
128 | .num_placement = 3, | |
129 | .placement = evictable_placement_flags, | |
130 | .num_busy_placement = 1, | |
131 | .busy_placement = &sys_placement_flags | |
132 | }; | |
133 | ||
5bb39e81 TH |
134 | struct ttm_placement vmw_srf_placement = { |
135 | .fpfn = 0, | |
136 | .lpfn = 0, | |
137 | .num_placement = 1, | |
138 | .num_busy_placement = 2, | |
139 | .placement = &gmr_placement_flags, | |
140 | .busy_placement = gmr_vram_placement_flags | |
141 | }; | |
142 | ||
649bf3ca | 143 | struct vmw_ttm_tt { |
d92d9851 | 144 | struct ttm_dma_tt dma_ttm; |
135cba0d TH |
145 | struct vmw_private *dev_priv; |
146 | int gmr_id; | |
d92d9851 TH |
147 | struct sg_table sgt; |
148 | struct vmw_sg_table vsgt; | |
149 | uint64_t sg_alloc_size; | |
150 | bool mapped; | |
fb1d9738 JB |
151 | }; |
152 | ||
308d17ef TH |
153 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
154 | ||
d92d9851 TH |
155 | /** |
156 | * Helper functions to advance a struct vmw_piter iterator. | |
157 | * | |
158 | * @viter: Pointer to the iterator. | |
159 | * | |
160 | * These functions return false if past the end of the list, | |
161 | * true otherwise. Functions are selected depending on the current | |
162 | * DMA mapping mode. | |
163 | */ | |
164 | static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) | |
165 | { | |
166 | return ++(viter->i) < viter->num_pages; | |
167 | } | |
168 | ||
169 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) | |
170 | { | |
171 | return __sg_page_iter_next(&viter->iter); | |
172 | } | |
173 | ||
174 | ||
175 | /** | |
176 | * Helper functions to return a pointer to the current page. | |
177 | * | |
178 | * @viter: Pointer to the iterator | |
179 | * | |
180 | * These functions return a pointer to the page currently | |
181 | * pointed to by @viter. Functions are selected depending on the | |
182 | * current mapping mode. | |
183 | */ | |
184 | static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) | |
185 | { | |
186 | return viter->pages[viter->i]; | |
187 | } | |
188 | ||
189 | static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) | |
190 | { | |
191 | return sg_page_iter_page(&viter->iter); | |
192 | } | |
193 | ||
194 | ||
195 | /** | |
196 | * Helper functions to return the DMA address of the current page. | |
197 | * | |
198 | * @viter: Pointer to the iterator | |
199 | * | |
200 | * These functions return the DMA address of the page currently | |
201 | * pointed to by @viter. Functions are selected depending on the | |
202 | * current mapping mode. | |
203 | */ | |
204 | static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) | |
205 | { | |
206 | return page_to_phys(viter->pages[viter->i]); | |
207 | } | |
208 | ||
209 | static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) | |
210 | { | |
211 | return viter->addrs[viter->i]; | |
212 | } | |
213 | ||
214 | static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) | |
215 | { | |
216 | return sg_page_iter_dma_address(&viter->iter); | |
217 | } | |
218 | ||
219 | ||
220 | /** | |
221 | * vmw_piter_start - Initialize a struct vmw_piter. | |
222 | * | |
223 | * @viter: Pointer to the iterator to initialize | |
224 | * @vsgt: Pointer to a struct vmw_sg_table to initialize from | |
225 | * | |
226 | * Note that we're following the convention of __sg_page_iter_start, so that | |
227 | * the iterator doesn't point to a valid page after initialization; it has | |
228 | * to be advanced one step first. | |
229 | */ | |
230 | void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |
231 | unsigned long p_offset) | |
232 | { | |
233 | viter->i = p_offset - 1; | |
234 | viter->num_pages = vsgt->num_pages; | |
235 | switch (vsgt->mode) { | |
236 | case vmw_dma_phys: | |
237 | viter->next = &__vmw_piter_non_sg_next; | |
238 | viter->dma_address = &__vmw_piter_phys_addr; | |
239 | viter->page = &__vmw_piter_non_sg_page; | |
240 | viter->pages = vsgt->pages; | |
241 | break; | |
242 | case vmw_dma_alloc_coherent: | |
243 | viter->next = &__vmw_piter_non_sg_next; | |
244 | viter->dma_address = &__vmw_piter_dma_addr; | |
245 | viter->page = &__vmw_piter_non_sg_page; | |
246 | viter->addrs = vsgt->addrs; | |
247 | break; | |
248 | case vmw_dma_map_populate: | |
249 | case vmw_dma_map_bind: | |
250 | viter->next = &__vmw_piter_sg_next; | |
251 | viter->dma_address = &__vmw_piter_sg_addr; | |
252 | viter->page = &__vmw_piter_sg_page; | |
253 | __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, | |
254 | vsgt->sgt->orig_nents, p_offset); | |
255 | break; | |
256 | default: | |
257 | BUG(); | |
258 | } | |
259 | } | |
260 | ||
261 | /** | |
262 | * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for | |
263 | * TTM pages | |
264 | * | |
265 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
266 | * | |
267 | * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. | |
268 | */ | |
269 | static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) | |
270 | { | |
271 | struct device *dev = vmw_tt->dev_priv->dev->dev; | |
272 | ||
273 | dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, | |
274 | DMA_BIDIRECTIONAL); | |
275 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; | |
276 | } | |
277 | ||
278 | /** | |
279 | * vmw_ttm_map_for_dma - map TTM pages to get device addresses | |
280 | * | |
281 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
282 | * | |
283 | * This function is used to get device addresses from the kernel DMA layer. | |
284 | * However, it's violating the DMA API in that when this operation has been | |
285 | * performed, it's illegal for the CPU to write to the pages without first | |
286 | * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is | |
287 | * therefore only legal to call this function if we know that the function | |
288 | * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most | |
289 | * a CPU write buffer flush. | |
290 | */ | |
291 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) | |
292 | { | |
293 | struct device *dev = vmw_tt->dev_priv->dev->dev; | |
294 | int ret; | |
295 | ||
296 | ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, | |
297 | DMA_BIDIRECTIONAL); | |
298 | if (unlikely(ret == 0)) | |
299 | return -ENOMEM; | |
300 | ||
301 | vmw_tt->sgt.nents = ret; | |
302 | ||
303 | return 0; | |
304 | } | |
305 | ||
306 | /** | |
307 | * vmw_ttm_map_dma - Make sure TTM pages are visible to the device | |
308 | * | |
309 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
310 | * | |
311 | * Select the correct function for and make sure the TTM pages are | |
312 | * visible to the device. Allocate storage for the device mappings. | |
313 | * If a mapping has already been performed, indicated by the storage | |
314 | * pointer being non NULL, the function returns success. | |
315 | */ | |
316 | static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) | |
317 | { | |
318 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
319 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
320 | struct vmw_sg_table *vsgt = &vmw_tt->vsgt; | |
321 | struct vmw_piter iter; | |
322 | dma_addr_t old; | |
323 | int ret = 0; | |
324 | static size_t sgl_size; | |
325 | static size_t sgt_size; | |
326 | ||
327 | if (vmw_tt->mapped) | |
328 | return 0; | |
329 | ||
330 | vsgt->mode = dev_priv->map_mode; | |
331 | vsgt->pages = vmw_tt->dma_ttm.ttm.pages; | |
332 | vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; | |
333 | vsgt->addrs = vmw_tt->dma_ttm.dma_address; | |
334 | vsgt->sgt = &vmw_tt->sgt; | |
335 | ||
336 | switch (dev_priv->map_mode) { | |
337 | case vmw_dma_map_bind: | |
338 | case vmw_dma_map_populate: | |
339 | if (unlikely(!sgl_size)) { | |
340 | sgl_size = ttm_round_pot(sizeof(struct scatterlist)); | |
341 | sgt_size = ttm_round_pot(sizeof(struct sg_table)); | |
342 | } | |
343 | vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; | |
344 | ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, | |
345 | true); | |
346 | if (unlikely(ret != 0)) | |
347 | return ret; | |
348 | ||
349 | ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, | |
350 | vsgt->num_pages, 0, | |
351 | (unsigned long) | |
352 | vsgt->num_pages << PAGE_SHIFT, | |
353 | GFP_KERNEL); | |
354 | if (unlikely(ret != 0)) | |
355 | goto out_sg_alloc_fail; | |
356 | ||
357 | if (vsgt->num_pages > vmw_tt->sgt.nents) { | |
358 | uint64_t over_alloc = | |
359 | sgl_size * (vsgt->num_pages - | |
360 | vmw_tt->sgt.nents); | |
361 | ||
362 | ttm_mem_global_free(glob, over_alloc); | |
363 | vmw_tt->sg_alloc_size -= over_alloc; | |
364 | } | |
365 | ||
366 | ret = vmw_ttm_map_for_dma(vmw_tt); | |
367 | if (unlikely(ret != 0)) | |
368 | goto out_map_fail; | |
369 | ||
370 | break; | |
371 | default: | |
372 | break; | |
373 | } | |
374 | ||
375 | old = ~((dma_addr_t) 0); | |
376 | vmw_tt->vsgt.num_regions = 0; | |
377 | for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { | |
378 | dma_addr_t cur = vmw_piter_dma_addr(&iter); | |
379 | ||
380 | if (cur != old + PAGE_SIZE) | |
381 | vmw_tt->vsgt.num_regions++; | |
382 | old = cur; | |
383 | } | |
384 | ||
385 | vmw_tt->mapped = true; | |
386 | return 0; | |
387 | ||
388 | out_map_fail: | |
389 | sg_free_table(vmw_tt->vsgt.sgt); | |
390 | vmw_tt->vsgt.sgt = NULL; | |
391 | out_sg_alloc_fail: | |
392 | ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); | |
393 | return ret; | |
394 | } | |
395 | ||
396 | /** | |
397 | * vmw_ttm_unmap_dma - Tear down any TTM page device mappings | |
398 | * | |
399 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
400 | * | |
401 | * Tear down any previously set up device DMA mappings and free | |
402 | * any storage space allocated for them. If there are no mappings set up, | |
403 | * this function is a NOP. | |
404 | */ | |
405 | static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |
406 | { | |
407 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
408 | ||
409 | if (!vmw_tt->vsgt.sgt) | |
410 | return; | |
411 | ||
412 | switch (dev_priv->map_mode) { | |
413 | case vmw_dma_map_bind: | |
414 | case vmw_dma_map_populate: | |
415 | vmw_ttm_unmap_from_dma(vmw_tt); | |
416 | sg_free_table(vmw_tt->vsgt.sgt); | |
417 | vmw_tt->vsgt.sgt = NULL; | |
418 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | |
419 | vmw_tt->sg_alloc_size); | |
420 | break; | |
421 | default: | |
422 | break; | |
423 | } | |
424 | vmw_tt->mapped = false; | |
425 | } | |
426 | ||
649bf3ca | 427 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
fb1d9738 | 428 | { |
d92d9851 TH |
429 | struct vmw_ttm_tt *vmw_be = |
430 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
431 | int ret; | |
432 | ||
433 | ret = vmw_ttm_map_dma(vmw_be); | |
434 | if (unlikely(ret != 0)) | |
435 | return ret; | |
135cba0d TH |
436 | |
437 | vmw_be->gmr_id = bo_mem->start; | |
438 | ||
d92d9851 | 439 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
649bf3ca | 440 | ttm->num_pages, vmw_be->gmr_id); |
fb1d9738 JB |
441 | } |
442 | ||
649bf3ca | 443 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
fb1d9738 | 444 | { |
d92d9851 TH |
445 | struct vmw_ttm_tt *vmw_be = |
446 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
135cba0d TH |
447 | |
448 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | |
d92d9851 TH |
449 | |
450 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | |
451 | vmw_ttm_unmap_dma(vmw_be); | |
452 | ||
fb1d9738 JB |
453 | return 0; |
454 | } | |
455 | ||
649bf3ca | 456 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
fb1d9738 | 457 | { |
d92d9851 TH |
458 | struct vmw_ttm_tt *vmw_be = |
459 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
460 | ||
461 | vmw_ttm_unmap_dma(vmw_be); | |
462 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) | |
463 | ttm_dma_tt_fini(&vmw_be->dma_ttm); | |
464 | else | |
465 | ttm_tt_fini(ttm); | |
fb1d9738 JB |
466 | kfree(vmw_be); |
467 | } | |
468 | ||
d92d9851 TH |
469 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
470 | { | |
471 | struct vmw_ttm_tt *vmw_tt = | |
472 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
473 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
474 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
475 | int ret; | |
476 | ||
477 | if (ttm->state != tt_unpopulated) | |
478 | return 0; | |
479 | ||
480 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | |
481 | size_t size = | |
482 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); | |
483 | ret = ttm_mem_global_alloc(glob, size, false, true); | |
484 | if (unlikely(ret != 0)) | |
485 | return ret; | |
486 | ||
487 | ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); | |
488 | if (unlikely(ret != 0)) | |
489 | ttm_mem_global_free(glob, size); | |
490 | } else | |
491 | ret = ttm_pool_populate(ttm); | |
492 | ||
493 | return ret; | |
494 | } | |
495 | ||
496 | static void vmw_ttm_unpopulate(struct ttm_tt *ttm) | |
497 | { | |
498 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, | |
499 | dma_ttm.ttm); | |
500 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
501 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
502 | ||
503 | vmw_ttm_unmap_dma(vmw_tt); | |
504 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | |
505 | size_t size = | |
506 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); | |
507 | ||
508 | ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); | |
509 | ttm_mem_global_free(glob, size); | |
510 | } else | |
511 | ttm_pool_unpopulate(ttm); | |
512 | } | |
513 | ||
fb1d9738 | 514 | static struct ttm_backend_func vmw_ttm_func = { |
fb1d9738 JB |
515 | .bind = vmw_ttm_bind, |
516 | .unbind = vmw_ttm_unbind, | |
517 | .destroy = vmw_ttm_destroy, | |
518 | }; | |
519 | ||
8227622f | 520 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
649bf3ca JG |
521 | unsigned long size, uint32_t page_flags, |
522 | struct page *dummy_read_page) | |
fb1d9738 | 523 | { |
649bf3ca | 524 | struct vmw_ttm_tt *vmw_be; |
d92d9851 | 525 | int ret; |
fb1d9738 | 526 | |
d92d9851 | 527 | vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
fb1d9738 JB |
528 | if (!vmw_be) |
529 | return NULL; | |
530 | ||
d92d9851 | 531 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
135cba0d | 532 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
fb1d9738 | 533 | |
d92d9851 TH |
534 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
535 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, | |
536 | dummy_read_page); | |
537 | else | |
538 | ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, | |
539 | dummy_read_page); | |
540 | if (unlikely(ret != 0)) | |
541 | goto out_no_init; | |
542 | ||
543 | return &vmw_be->dma_ttm.ttm; | |
544 | out_no_init: | |
545 | kfree(vmw_be); | |
546 | return NULL; | |
fb1d9738 JB |
547 | } |
548 | ||
8227622f | 549 | static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
fb1d9738 JB |
550 | { |
551 | return 0; | |
552 | } | |
553 | ||
8227622f | 554 | static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
fb1d9738 JB |
555 | struct ttm_mem_type_manager *man) |
556 | { | |
fb1d9738 JB |
557 | switch (type) { |
558 | case TTM_PL_SYSTEM: | |
559 | /* System memory */ | |
560 | ||
561 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
135cba0d | 562 | man->available_caching = TTM_PL_FLAG_CACHED; |
fb1d9738 JB |
563 | man->default_caching = TTM_PL_FLAG_CACHED; |
564 | break; | |
565 | case TTM_PL_VRAM: | |
566 | /* "On-card" video ram */ | |
d961db75 | 567 | man->func = &ttm_bo_manager_func; |
fb1d9738 | 568 | man->gpu_offset = 0; |
96bf8b87 | 569 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
135cba0d TH |
570 | man->available_caching = TTM_PL_FLAG_CACHED; |
571 | man->default_caching = TTM_PL_FLAG_CACHED; | |
572 | break; | |
573 | case VMW_PL_GMR: | |
574 | /* | |
575 | * "Guest Memory Regions" is an aperture like feature with | |
576 | * one slot per bo. There is an upper limit of the number of | |
577 | * slots as well as the bo size. | |
578 | */ | |
579 | man->func = &vmw_gmrid_manager_func; | |
580 | man->gpu_offset = 0; | |
581 | man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; | |
582 | man->available_caching = TTM_PL_FLAG_CACHED; | |
583 | man->default_caching = TTM_PL_FLAG_CACHED; | |
fb1d9738 JB |
584 | break; |
585 | default: | |
586 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
587 | return -EINVAL; | |
588 | } | |
589 | return 0; | |
590 | } | |
591 | ||
8227622f | 592 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
fb1d9738 JB |
593 | struct ttm_placement *placement) |
594 | { | |
595 | *placement = vmw_sys_placement; | |
596 | } | |
597 | ||
fb1d9738 JB |
598 | static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
599 | { | |
d08a9b9c TH |
600 | struct ttm_object_file *tfile = |
601 | vmw_fpriv((struct drm_file *)filp->private_data)->tfile; | |
602 | ||
603 | return vmw_user_dmabuf_verify_access(bo, tfile); | |
fb1d9738 JB |
604 | } |
605 | ||
96bf8b87 JG |
606 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
607 | { | |
608 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
609 | struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); | |
610 | ||
611 | mem->bus.addr = NULL; | |
612 | mem->bus.is_iomem = false; | |
613 | mem->bus.offset = 0; | |
614 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
615 | mem->bus.base = 0; | |
616 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
617 | return -EINVAL; | |
618 | switch (mem->mem_type) { | |
619 | case TTM_PL_SYSTEM: | |
135cba0d | 620 | case VMW_PL_GMR: |
96bf8b87 JG |
621 | return 0; |
622 | case TTM_PL_VRAM: | |
d961db75 | 623 | mem->bus.offset = mem->start << PAGE_SHIFT; |
96bf8b87 JG |
624 | mem->bus.base = dev_priv->vram_start; |
625 | mem->bus.is_iomem = true; | |
626 | break; | |
627 | default: | |
628 | return -EINVAL; | |
629 | } | |
630 | return 0; | |
631 | } | |
632 | ||
633 | static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
634 | { | |
635 | } | |
636 | ||
637 | static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
638 | { | |
639 | return 0; | |
640 | } | |
641 | ||
fb1d9738 JB |
642 | /** |
643 | * FIXME: We're using the old vmware polling method to sync. | |
644 | * Do this with fences instead. | |
645 | */ | |
646 | ||
647 | static void *vmw_sync_obj_ref(void *sync_obj) | |
648 | { | |
ae2a1040 TH |
649 | |
650 | return (void *) | |
651 | vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); | |
fb1d9738 JB |
652 | } |
653 | ||
654 | static void vmw_sync_obj_unref(void **sync_obj) | |
655 | { | |
ae2a1040 | 656 | vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); |
fb1d9738 JB |
657 | } |
658 | ||
dedfdffd | 659 | static int vmw_sync_obj_flush(void *sync_obj) |
fb1d9738 | 660 | { |
ae2a1040 | 661 | vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); |
fb1d9738 JB |
662 | return 0; |
663 | } | |
664 | ||
dedfdffd | 665 | static bool vmw_sync_obj_signaled(void *sync_obj) |
fb1d9738 | 666 | { |
ae2a1040 | 667 | return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, |
be013367 | 668 | DRM_VMW_FENCE_FLAG_EXEC); |
fb1d9738 | 669 | |
fb1d9738 JB |
670 | } |
671 | ||
dedfdffd | 672 | static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
fb1d9738 | 673 | { |
ae2a1040 | 674 | return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, |
be013367 | 675 | DRM_VMW_FENCE_FLAG_EXEC, |
ae2a1040 TH |
676 | lazy, interruptible, |
677 | VMW_FENCE_WAIT_TIMEOUT); | |
fb1d9738 JB |
678 | } |
679 | ||
680 | struct ttm_bo_driver vmw_bo_driver = { | |
649bf3ca | 681 | .ttm_tt_create = &vmw_ttm_tt_create, |
d92d9851 TH |
682 | .ttm_tt_populate = &vmw_ttm_populate, |
683 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, | |
fb1d9738 JB |
684 | .invalidate_caches = vmw_invalidate_caches, |
685 | .init_mem_type = vmw_init_mem_type, | |
686 | .evict_flags = vmw_evict_flags, | |
687 | .move = NULL, | |
688 | .verify_access = vmw_verify_access, | |
689 | .sync_obj_signaled = vmw_sync_obj_signaled, | |
690 | .sync_obj_wait = vmw_sync_obj_wait, | |
691 | .sync_obj_flush = vmw_sync_obj_flush, | |
692 | .sync_obj_unref = vmw_sync_obj_unref, | |
effe1105 | 693 | .sync_obj_ref = vmw_sync_obj_ref, |
135cba0d TH |
694 | .move_notify = NULL, |
695 | .swap_notify = NULL, | |
96bf8b87 JG |
696 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
697 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | |
698 | .io_mem_free = &vmw_ttm_io_mem_free, | |
fb1d9738 | 699 | }; |