]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/gpu/drm/qxl/qxl_object.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / qxl / qxl_object.c
1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26 #include <linux/dma-buf-map.h>
27 #include <linux/io-mapping.h>
28
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31
32 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
33 {
34 struct qxl_bo *bo;
35 struct qxl_device *qdev;
36
37 bo = to_qxl_bo(tbo);
38 qdev = to_qxl(bo->tbo.base.dev);
39
40 qxl_surface_evict(qdev, bo, false);
41 WARN_ON_ONCE(bo->map_count > 0);
42 mutex_lock(&qdev->gem.mutex);
43 list_del_init(&bo->list);
44 mutex_unlock(&qdev->gem.mutex);
45 drm_gem_object_release(&bo->tbo.base);
46 kfree(bo);
47 }
48
49 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
50 {
51 if (bo->destroy == &qxl_ttm_bo_destroy)
52 return true;
53 return false;
54 }
55
56 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
57 {
58 u32 c = 0;
59 u32 pflag = 0;
60 unsigned int i;
61
62 if (qbo->tbo.base.size <= PAGE_SIZE)
63 pflag |= TTM_PL_FLAG_TOPDOWN;
64
65 qbo->placement.placement = qbo->placements;
66 qbo->placement.busy_placement = qbo->placements;
67 if (domain == QXL_GEM_DOMAIN_VRAM) {
68 qbo->placements[c].mem_type = TTM_PL_VRAM;
69 qbo->placements[c++].flags = pflag;
70 }
71 if (domain == QXL_GEM_DOMAIN_SURFACE) {
72 qbo->placements[c].mem_type = TTM_PL_PRIV;
73 qbo->placements[c++].flags = pflag;
74 qbo->placements[c].mem_type = TTM_PL_VRAM;
75 qbo->placements[c++].flags = pflag;
76 }
77 if (domain == QXL_GEM_DOMAIN_CPU) {
78 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
79 qbo->placements[c++].flags = pflag;
80 }
81 if (!c) {
82 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
83 qbo->placements[c++].flags = 0;
84 }
85 qbo->placement.num_placement = c;
86 qbo->placement.num_busy_placement = c;
87 for (i = 0; i < c; ++i) {
88 qbo->placements[i].fpfn = 0;
89 qbo->placements[i].lpfn = 0;
90 }
91 }
92
93 static const struct drm_gem_object_funcs qxl_object_funcs = {
94 .free = qxl_gem_object_free,
95 .open = qxl_gem_object_open,
96 .close = qxl_gem_object_close,
97 .pin = qxl_gem_prime_pin,
98 .unpin = qxl_gem_prime_unpin,
99 .get_sg_table = qxl_gem_prime_get_sg_table,
100 .vmap = qxl_gem_prime_vmap,
101 .vunmap = qxl_gem_prime_vunmap,
102 .mmap = drm_gem_ttm_mmap,
103 .print_info = drm_gem_ttm_print_info,
104 };
105
106 int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
107 bool kernel, bool pinned, u32 domain, u32 priority,
108 struct qxl_surface *surf,
109 struct qxl_bo **bo_ptr)
110 {
111 struct ttm_operation_ctx ctx = { !kernel, false };
112 struct qxl_bo *bo;
113 enum ttm_bo_type type;
114 int r;
115
116 if (kernel)
117 type = ttm_bo_type_kernel;
118 else
119 type = ttm_bo_type_device;
120 *bo_ptr = NULL;
121 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
122 if (bo == NULL)
123 return -ENOMEM;
124 size = roundup(size, PAGE_SIZE);
125 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
126 if (unlikely(r)) {
127 kfree(bo);
128 return r;
129 }
130 bo->tbo.base.funcs = &qxl_object_funcs;
131 bo->type = domain;
132 bo->surface_id = 0;
133 INIT_LIST_HEAD(&bo->list);
134
135 if (surf)
136 bo->surf = *surf;
137
138 qxl_ttm_placement_from_domain(bo, domain);
139
140 bo->tbo.priority = priority;
141 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
142 &bo->placement, 0, &ctx, size,
143 NULL, NULL, &qxl_ttm_bo_destroy);
144 if (unlikely(r != 0)) {
145 if (r != -ERESTARTSYS)
146 dev_err(qdev->ddev.dev,
147 "object_init failed for (%lu, 0x%08X)\n",
148 size, domain);
149 return r;
150 }
151 if (pinned)
152 ttm_bo_pin(&bo->tbo);
153 ttm_bo_unreserve(&bo->tbo);
154 *bo_ptr = bo;
155 return 0;
156 }
157
158 int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map)
159 {
160 int r;
161
162 if (bo->kptr) {
163 bo->map_count++;
164 goto out;
165 }
166 r = ttm_bo_vmap(&bo->tbo, &bo->map);
167 if (r)
168 return r;
169 bo->map_count = 1;
170
171 /* TODO: Remove kptr in favor of map everywhere. */
172 if (bo->map.is_iomem)
173 bo->kptr = (void *)bo->map.vaddr_iomem;
174 else
175 bo->kptr = bo->map.vaddr;
176
177 out:
178 *map = bo->map;
179 return 0;
180 }
181
182 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
183 struct qxl_bo *bo, int page_offset)
184 {
185 unsigned long offset;
186 void *rptr;
187 int ret;
188 struct io_mapping *map;
189 struct dma_buf_map bo_map;
190
191 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
192 map = qdev->vram_mapping;
193 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
194 map = qdev->surface_mapping;
195 else
196 goto fallback;
197
198 offset = bo->tbo.mem.start << PAGE_SHIFT;
199 return io_mapping_map_atomic_wc(map, offset + page_offset);
200 fallback:
201 if (bo->kptr) {
202 rptr = bo->kptr + (page_offset * PAGE_SIZE);
203 return rptr;
204 }
205
206 ret = qxl_bo_kmap(bo, &bo_map);
207 if (ret)
208 return NULL;
209 rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
210
211 rptr += page_offset * PAGE_SIZE;
212 return rptr;
213 }
214
215 void qxl_bo_kunmap(struct qxl_bo *bo)
216 {
217 if (bo->kptr == NULL)
218 return;
219 bo->map_count--;
220 if (bo->map_count > 0)
221 return;
222 bo->kptr = NULL;
223 ttm_bo_vunmap(&bo->tbo, &bo->map);
224 }
225
226 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
227 struct qxl_bo *bo, void *pmap)
228 {
229 if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
230 (bo->tbo.mem.mem_type != TTM_PL_PRIV))
231 goto fallback;
232
233 io_mapping_unmap_atomic(pmap);
234 return;
235 fallback:
236 qxl_bo_kunmap(bo);
237 }
238
239 void qxl_bo_unref(struct qxl_bo **bo)
240 {
241 if ((*bo) == NULL)
242 return;
243
244 drm_gem_object_put(&(*bo)->tbo.base);
245 *bo = NULL;
246 }
247
248 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
249 {
250 drm_gem_object_get(&bo->tbo.base);
251 return bo;
252 }
253
254 static int __qxl_bo_pin(struct qxl_bo *bo)
255 {
256 struct ttm_operation_ctx ctx = { false, false };
257 struct drm_device *ddev = bo->tbo.base.dev;
258 int r;
259
260 if (bo->tbo.pin_count) {
261 ttm_bo_pin(&bo->tbo);
262 return 0;
263 }
264 qxl_ttm_placement_from_domain(bo, bo->type);
265 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
266 if (likely(r == 0))
267 ttm_bo_pin(&bo->tbo);
268 if (unlikely(r != 0))
269 dev_err(ddev->dev, "%p pin failed\n", bo);
270 return r;
271 }
272
273 static void __qxl_bo_unpin(struct qxl_bo *bo)
274 {
275 ttm_bo_unpin(&bo->tbo);
276 }
277
278 /*
279 * Reserve the BO before pinning the object. If the BO was reserved
280 * beforehand, use the internal version directly __qxl_bo_pin.
281 *
282 */
283 int qxl_bo_pin(struct qxl_bo *bo)
284 {
285 int r;
286
287 r = qxl_bo_reserve(bo);
288 if (r)
289 return r;
290
291 r = __qxl_bo_pin(bo);
292 qxl_bo_unreserve(bo);
293 return r;
294 }
295
296 /*
297 * Reserve the BO before pinning the object. If the BO was reserved
298 * beforehand, use the internal version directly __qxl_bo_unpin.
299 *
300 */
301 int qxl_bo_unpin(struct qxl_bo *bo)
302 {
303 int r;
304
305 r = qxl_bo_reserve(bo);
306 if (r)
307 return r;
308
309 __qxl_bo_unpin(bo);
310 qxl_bo_unreserve(bo);
311 return 0;
312 }
313
314 void qxl_bo_force_delete(struct qxl_device *qdev)
315 {
316 struct qxl_bo *bo, *n;
317
318 if (list_empty(&qdev->gem.objects))
319 return;
320 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
321 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
322 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
323 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
324 *((unsigned long *)&bo->tbo.base.refcount));
325 mutex_lock(&qdev->gem.mutex);
326 list_del_init(&bo->list);
327 mutex_unlock(&qdev->gem.mutex);
328 /* this should unref the ttm bo */
329 drm_gem_object_put(&bo->tbo.base);
330 }
331 }
332
333 int qxl_bo_init(struct qxl_device *qdev)
334 {
335 return qxl_ttm_init(qdev);
336 }
337
338 void qxl_bo_fini(struct qxl_device *qdev)
339 {
340 qxl_ttm_fini(qdev);
341 }
342
343 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
344 {
345 int ret;
346
347 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
348 /* allocate a surface id for this surface now */
349 ret = qxl_surface_id_alloc(qdev, bo);
350 if (ret)
351 return ret;
352
353 ret = qxl_hw_surface_alloc(qdev, bo);
354 if (ret)
355 return ret;
356 }
357 return 0;
358 }
359
360 int qxl_surf_evict(struct qxl_device *qdev)
361 {
362 struct ttm_resource_manager *man;
363
364 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
365 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
366 }
367
368 int qxl_vram_evict(struct qxl_device *qdev)
369 {
370 struct ttm_resource_manager *man;
371
372 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
373 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
374 }