2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 #include <linux/dma-buf-map.h>
27 #include <linux/io-mapping.h>
30 #include "qxl_object.h"
32 static void qxl_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
35 struct qxl_device
*qdev
;
38 qdev
= to_qxl(bo
->tbo
.base
.dev
);
40 qxl_surface_evict(qdev
, bo
, false);
41 WARN_ON_ONCE(bo
->map_count
> 0);
42 mutex_lock(&qdev
->gem
.mutex
);
43 list_del_init(&bo
->list
);
44 mutex_unlock(&qdev
->gem
.mutex
);
45 drm_gem_object_release(&bo
->tbo
.base
);
49 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object
*bo
)
51 if (bo
->destroy
== &qxl_ttm_bo_destroy
)
56 void qxl_ttm_placement_from_domain(struct qxl_bo
*qbo
, u32 domain
)
62 if (qbo
->tbo
.base
.size
<= PAGE_SIZE
)
63 pflag
|= TTM_PL_FLAG_TOPDOWN
;
65 qbo
->placement
.placement
= qbo
->placements
;
66 qbo
->placement
.busy_placement
= qbo
->placements
;
67 if (domain
== QXL_GEM_DOMAIN_VRAM
) {
68 qbo
->placements
[c
].mem_type
= TTM_PL_VRAM
;
69 qbo
->placements
[c
++].flags
= pflag
;
71 if (domain
== QXL_GEM_DOMAIN_SURFACE
) {
72 qbo
->placements
[c
].mem_type
= TTM_PL_PRIV
;
73 qbo
->placements
[c
++].flags
= pflag
;
74 qbo
->placements
[c
].mem_type
= TTM_PL_VRAM
;
75 qbo
->placements
[c
++].flags
= pflag
;
77 if (domain
== QXL_GEM_DOMAIN_CPU
) {
78 qbo
->placements
[c
].mem_type
= TTM_PL_SYSTEM
;
79 qbo
->placements
[c
++].flags
= pflag
;
82 qbo
->placements
[c
].mem_type
= TTM_PL_SYSTEM
;
83 qbo
->placements
[c
++].flags
= 0;
85 qbo
->placement
.num_placement
= c
;
86 qbo
->placement
.num_busy_placement
= c
;
87 for (i
= 0; i
< c
; ++i
) {
88 qbo
->placements
[i
].fpfn
= 0;
89 qbo
->placements
[i
].lpfn
= 0;
93 static const struct drm_gem_object_funcs qxl_object_funcs
= {
94 .free
= qxl_gem_object_free
,
95 .open
= qxl_gem_object_open
,
96 .close
= qxl_gem_object_close
,
97 .pin
= qxl_gem_prime_pin
,
98 .unpin
= qxl_gem_prime_unpin
,
99 .get_sg_table
= qxl_gem_prime_get_sg_table
,
100 .vmap
= qxl_gem_prime_vmap
,
101 .vunmap
= qxl_gem_prime_vunmap
,
102 .mmap
= drm_gem_ttm_mmap
,
103 .print_info
= drm_gem_ttm_print_info
,
106 int qxl_bo_create(struct qxl_device
*qdev
, unsigned long size
,
107 bool kernel
, bool pinned
, u32 domain
, u32 priority
,
108 struct qxl_surface
*surf
,
109 struct qxl_bo
**bo_ptr
)
111 struct ttm_operation_ctx ctx
= { !kernel
, false };
113 enum ttm_bo_type type
;
117 type
= ttm_bo_type_kernel
;
119 type
= ttm_bo_type_device
;
121 bo
= kzalloc(sizeof(struct qxl_bo
), GFP_KERNEL
);
124 size
= roundup(size
, PAGE_SIZE
);
125 r
= drm_gem_object_init(&qdev
->ddev
, &bo
->tbo
.base
, size
);
130 bo
->tbo
.base
.funcs
= &qxl_object_funcs
;
133 INIT_LIST_HEAD(&bo
->list
);
138 qxl_ttm_placement_from_domain(bo
, domain
);
140 bo
->tbo
.priority
= priority
;
141 r
= ttm_bo_init_reserved(&qdev
->mman
.bdev
, &bo
->tbo
, size
, type
,
142 &bo
->placement
, 0, &ctx
, size
,
143 NULL
, NULL
, &qxl_ttm_bo_destroy
);
144 if (unlikely(r
!= 0)) {
145 if (r
!= -ERESTARTSYS
)
146 dev_err(qdev
->ddev
.dev
,
147 "object_init failed for (%lu, 0x%08X)\n",
152 ttm_bo_pin(&bo
->tbo
);
153 ttm_bo_unreserve(&bo
->tbo
);
158 int qxl_bo_kmap(struct qxl_bo
*bo
, struct dma_buf_map
*map
)
166 r
= ttm_bo_vmap(&bo
->tbo
, &bo
->map
);
171 /* TODO: Remove kptr in favor of map everywhere. */
172 if (bo
->map
.is_iomem
)
173 bo
->kptr
= (void *)bo
->map
.vaddr_iomem
;
175 bo
->kptr
= bo
->map
.vaddr
;
182 void *qxl_bo_kmap_atomic_page(struct qxl_device
*qdev
,
183 struct qxl_bo
*bo
, int page_offset
)
185 unsigned long offset
;
188 struct io_mapping
*map
;
189 struct dma_buf_map bo_map
;
191 if (bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
)
192 map
= qdev
->vram_mapping
;
193 else if (bo
->tbo
.mem
.mem_type
== TTM_PL_PRIV
)
194 map
= qdev
->surface_mapping
;
198 offset
= bo
->tbo
.mem
.start
<< PAGE_SHIFT
;
199 return io_mapping_map_atomic_wc(map
, offset
+ page_offset
);
202 rptr
= bo
->kptr
+ (page_offset
* PAGE_SIZE
);
206 ret
= qxl_bo_kmap(bo
, &bo_map
);
209 rptr
= bo_map
.vaddr
; /* TODO: Use mapping abstraction properly */
211 rptr
+= page_offset
* PAGE_SIZE
;
215 void qxl_bo_kunmap(struct qxl_bo
*bo
)
217 if (bo
->kptr
== NULL
)
220 if (bo
->map_count
> 0)
223 ttm_bo_vunmap(&bo
->tbo
, &bo
->map
);
226 void qxl_bo_kunmap_atomic_page(struct qxl_device
*qdev
,
227 struct qxl_bo
*bo
, void *pmap
)
229 if ((bo
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
) &&
230 (bo
->tbo
.mem
.mem_type
!= TTM_PL_PRIV
))
233 io_mapping_unmap_atomic(pmap
);
239 void qxl_bo_unref(struct qxl_bo
**bo
)
244 drm_gem_object_put(&(*bo
)->tbo
.base
);
248 struct qxl_bo
*qxl_bo_ref(struct qxl_bo
*bo
)
250 drm_gem_object_get(&bo
->tbo
.base
);
254 static int __qxl_bo_pin(struct qxl_bo
*bo
)
256 struct ttm_operation_ctx ctx
= { false, false };
257 struct drm_device
*ddev
= bo
->tbo
.base
.dev
;
260 if (bo
->tbo
.pin_count
) {
261 ttm_bo_pin(&bo
->tbo
);
264 qxl_ttm_placement_from_domain(bo
, bo
->type
);
265 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
267 ttm_bo_pin(&bo
->tbo
);
268 if (unlikely(r
!= 0))
269 dev_err(ddev
->dev
, "%p pin failed\n", bo
);
273 static void __qxl_bo_unpin(struct qxl_bo
*bo
)
275 ttm_bo_unpin(&bo
->tbo
);
279 * Reserve the BO before pinning the object. If the BO was reserved
280 * beforehand, use the internal version directly __qxl_bo_pin.
283 int qxl_bo_pin(struct qxl_bo
*bo
)
287 r
= qxl_bo_reserve(bo
);
291 r
= __qxl_bo_pin(bo
);
292 qxl_bo_unreserve(bo
);
297 * Reserve the BO before pinning the object. If the BO was reserved
298 * beforehand, use the internal version directly __qxl_bo_unpin.
301 int qxl_bo_unpin(struct qxl_bo
*bo
)
305 r
= qxl_bo_reserve(bo
);
310 qxl_bo_unreserve(bo
);
314 void qxl_bo_force_delete(struct qxl_device
*qdev
)
316 struct qxl_bo
*bo
, *n
;
318 if (list_empty(&qdev
->gem
.objects
))
320 dev_err(qdev
->ddev
.dev
, "Userspace still has active objects !\n");
321 list_for_each_entry_safe(bo
, n
, &qdev
->gem
.objects
, list
) {
322 dev_err(qdev
->ddev
.dev
, "%p %p %lu %lu force free\n",
323 &bo
->tbo
.base
, bo
, (unsigned long)bo
->tbo
.base
.size
,
324 *((unsigned long *)&bo
->tbo
.base
.refcount
));
325 mutex_lock(&qdev
->gem
.mutex
);
326 list_del_init(&bo
->list
);
327 mutex_unlock(&qdev
->gem
.mutex
);
328 /* this should unref the ttm bo */
329 drm_gem_object_put(&bo
->tbo
.base
);
333 int qxl_bo_init(struct qxl_device
*qdev
)
335 return qxl_ttm_init(qdev
);
338 void qxl_bo_fini(struct qxl_device
*qdev
)
343 int qxl_bo_check_id(struct qxl_device
*qdev
, struct qxl_bo
*bo
)
347 if (bo
->type
== QXL_GEM_DOMAIN_SURFACE
&& bo
->surface_id
== 0) {
348 /* allocate a surface id for this surface now */
349 ret
= qxl_surface_id_alloc(qdev
, bo
);
353 ret
= qxl_hw_surface_alloc(qdev
, bo
);
360 int qxl_surf_evict(struct qxl_device
*qdev
)
362 struct ttm_resource_manager
*man
;
364 man
= ttm_manager_type(&qdev
->mman
.bdev
, TTM_PL_PRIV
);
365 return ttm_resource_manager_evict_all(&qdev
->mman
.bdev
, man
);
368 int qxl_vram_evict(struct qxl_device
*qdev
)
370 struct ttm_resource_manager
*man
;
372 man
= ttm_manager_type(&qdev
->mman
.bdev
, TTM_PL_VRAM
);
373 return ttm_resource_manager_evict_all(&qdev
->mman
.bdev
, man
);