]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/udl/udl_gem.c
radeon: make radeon_cs_update_pages static.
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / udl / udl_gem.c
CommitLineData
5320918b
DA
1/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
6 * more details.
7 */
8
9#include "drmP.h"
10#include "udl_drv.h"
11#include <linux/shmem_fs.h>
96503f59 12#include <linux/dma-buf.h>
5320918b
DA
13
14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
15 size_t size)
16{
17 struct udl_gem_object *obj;
18
19 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
20 if (obj == NULL)
21 return NULL;
22
23 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
24 kfree(obj);
25 return NULL;
26 }
27
28 return obj;
29}
30
31static int
32udl_gem_create(struct drm_file *file,
33 struct drm_device *dev,
34 uint64_t size,
35 uint32_t *handle_p)
36{
37 struct udl_gem_object *obj;
38 int ret;
39 u32 handle;
40
41 size = roundup(size, PAGE_SIZE);
42
43 obj = udl_gem_alloc_object(dev, size);
44 if (obj == NULL)
45 return -ENOMEM;
46
47 ret = drm_gem_handle_create(file, &obj->base, &handle);
48 if (ret) {
49 drm_gem_object_release(&obj->base);
50 kfree(obj);
51 return ret;
52 }
53
54 drm_gem_object_unreference(&obj->base);
55 *handle_p = handle;
56 return 0;
57}
58
59int udl_dumb_create(struct drm_file *file,
60 struct drm_device *dev,
61 struct drm_mode_create_dumb *args)
62{
63 args->pitch = args->width * ((args->bpp + 1) / 8);
64 args->size = args->pitch * args->height;
65 return udl_gem_create(file, dev,
66 args->size, &args->handle);
67}
68
69int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
70 uint32_t handle)
71{
72 return drm_gem_handle_delete(file, handle);
73}
74
fa9e8550
KK
75int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
76{
77 int ret;
78
79 ret = drm_gem_mmap(filp, vma);
80 if (ret)
81 return ret;
82
83 vma->vm_flags &= ~VM_PFNMAP;
84 vma->vm_flags |= VM_MIXEDMAP;
85
86 return ret;
87}
88
5320918b
DA
89int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90{
91 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
92 struct page *page;
93 unsigned int page_offset;
94 int ret = 0;
95
96 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
97 PAGE_SHIFT;
98
99 if (!obj->pages)
100 return VM_FAULT_SIGBUS;
101
102 page = obj->pages[page_offset];
103 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
104 switch (ret) {
105 case -EAGAIN:
106 set_need_resched();
107 case 0:
108 case -ERESTARTSYS:
109 return VM_FAULT_NOPAGE;
110 case -ENOMEM:
111 return VM_FAULT_OOM;
112 default:
113 return VM_FAULT_SIGBUS;
114 }
115}
116
117int udl_gem_init_object(struct drm_gem_object *obj)
118{
119 BUG();
120
121 return 0;
122}
123
124static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125{
126 int page_count, i;
127 struct page *page;
128 struct inode *inode;
129 struct address_space *mapping;
130
131 if (obj->pages)
132 return 0;
133
134 page_count = obj->base.size / PAGE_SIZE;
135 BUG_ON(obj->pages != NULL);
136 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
137 if (obj->pages == NULL)
138 return -ENOMEM;
139
140 inode = obj->base.filp->f_path.dentry->d_inode;
141 mapping = inode->i_mapping;
142 gfpmask |= mapping_gfp_mask(mapping);
143
144 for (i = 0; i < page_count; i++) {
145 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
146 if (IS_ERR(page))
147 goto err_pages;
148 obj->pages[i] = page;
149 }
150
151 return 0;
152err_pages:
153 while (i--)
154 page_cache_release(obj->pages[i]);
155 drm_free_large(obj->pages);
156 obj->pages = NULL;
157 return PTR_ERR(page);
158}
159
160static void udl_gem_put_pages(struct udl_gem_object *obj)
161{
162 int page_count = obj->base.size / PAGE_SIZE;
163 int i;
164
96503f59
DA
165 if (obj->base.import_attach) {
166 drm_free_large(obj->pages);
167 obj->pages = NULL;
168 return;
169 }
170
5320918b
DA
171 for (i = 0; i < page_count; i++)
172 page_cache_release(obj->pages[i]);
173
174 drm_free_large(obj->pages);
175 obj->pages = NULL;
176}
177
178int udl_gem_vmap(struct udl_gem_object *obj)
179{
180 int page_count = obj->base.size / PAGE_SIZE;
181 int ret;
182
183 ret = udl_gem_get_pages(obj, GFP_KERNEL);
184 if (ret)
185 return ret;
186
187 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
188 if (!obj->vmapping)
189 return -ENOMEM;
190 return 0;
191}
192
193void udl_gem_vunmap(struct udl_gem_object *obj)
194{
195 if (obj->vmapping)
196 vunmap(obj->vmapping);
197
198 udl_gem_put_pages(obj);
199}
200
201void udl_gem_free_object(struct drm_gem_object *gem_obj)
202{
203 struct udl_gem_object *obj = to_udl_bo(gem_obj);
204
96503f59
DA
205 if (gem_obj->import_attach)
206 drm_prime_gem_destroy(gem_obj, obj->sg);
207
5320918b
DA
208 if (obj->vmapping)
209 udl_gem_vunmap(obj);
210
211 if (obj->pages)
212 udl_gem_put_pages(obj);
213
214 if (gem_obj->map_list.map)
215 drm_gem_free_mmap_offset(gem_obj);
216}
217
218/* the dumb interface doesn't work with the GEM straight MMAP
219 interface, it expects to do MMAP on the drm fd, like normal */
220int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
221 uint32_t handle, uint64_t *offset)
222{
223 struct udl_gem_object *gobj;
224 struct drm_gem_object *obj;
225 int ret = 0;
226
227 mutex_lock(&dev->struct_mutex);
228 obj = drm_gem_object_lookup(dev, file, handle);
229 if (obj == NULL) {
230 ret = -ENOENT;
231 goto unlock;
232 }
233 gobj = to_udl_bo(obj);
234
235 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
236 if (ret)
237 return ret;
238 if (!gobj->base.map_list.map) {
239 ret = drm_gem_create_mmap_offset(obj);
240 if (ret)
241 goto out;
242 }
243
244 *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
245
246out:
247 drm_gem_object_unreference(&gobj->base);
248unlock:
249 mutex_unlock(&dev->struct_mutex);
250 return ret;
251}
96503f59
DA
252
253static int udl_prime_create(struct drm_device *dev,
254 size_t size,
255 struct sg_table *sg,
256 struct udl_gem_object **obj_p)
257{
258 struct udl_gem_object *obj;
259 int npages;
260 int i;
261 struct scatterlist *iter;
262
263 npages = size / PAGE_SIZE;
264
265 *obj_p = NULL;
266 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
267 if (!obj)
268 return -ENOMEM;
269
270 obj->sg = sg;
271 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
272 if (obj->pages == NULL) {
273 DRM_ERROR("obj pages is NULL %d\n", npages);
274 return -ENOMEM;
275 }
276
277 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
278
279 *obj_p = obj;
280 return 0;
281}
282
283struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
284 struct dma_buf *dma_buf)
285{
286 struct dma_buf_attachment *attach;
287 struct sg_table *sg;
288 struct udl_gem_object *uobj;
289 int ret;
290
291 /* need to attach */
292 attach = dma_buf_attach(dma_buf, dev->dev);
293 if (IS_ERR(attach))
294 return ERR_PTR(PTR_ERR(attach));
295
296 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
297 if (IS_ERR(sg)) {
298 ret = PTR_ERR(sg);
299 goto fail_detach;
300 }
301
302 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
303 if (ret) {
304 goto fail_unmap;
305 }
306
307 uobj->base.import_attach = attach;
308
309 return &uobj->base;
310
311fail_unmap:
312 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
313fail_detach:
314 dma_buf_detach(dma_buf, attach);
315 return ERR_PTR(ret);
316}