]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/media/video/videobuf2-dma-sg.c
[media] v4l2: vb2: fix queue reallocation and REQBUFS(0) case
[mirror_ubuntu-artful-kernel.git] / drivers / media / video / videobuf2-dma-sg.c
CommitLineData
5ba3f757
AP
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
24struct vb2_dma_sg_buf {
25 void *vaddr;
26 struct page **pages;
27 int write;
28 int offset;
29 struct vb2_dma_sg_desc sg_desc;
30 atomic_t refcount;
31 struct vb2_vmarea_handler handler;
32};
33
34static void vb2_dma_sg_put(void *buf_priv);
35
36static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
37{
38 struct vb2_dma_sg_buf *buf;
39 int i;
40
41 buf = kzalloc(sizeof *buf, GFP_KERNEL);
42 if (!buf)
43 return NULL;
44
45 buf->vaddr = NULL;
46 buf->write = 0;
47 buf->offset = 0;
48 buf->sg_desc.size = size;
49 buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
50
51 buf->sg_desc.sglist = vmalloc(buf->sg_desc.num_pages *
52 sizeof(*buf->sg_desc.sglist));
53 if (!buf->sg_desc.sglist)
54 goto fail_sglist_alloc;
55 memset(buf->sg_desc.sglist, 0, buf->sg_desc.num_pages *
56 sizeof(*buf->sg_desc.sglist));
57 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
58
59 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
60 GFP_KERNEL);
61 if (!buf->pages)
62 goto fail_pages_array_alloc;
63
64 for (i = 0; i < buf->sg_desc.num_pages; ++i) {
65 buf->pages[i] = alloc_page(GFP_KERNEL);
66 if (NULL == buf->pages[i])
67 goto fail_pages_alloc;
68 sg_set_page(&buf->sg_desc.sglist[i],
69 buf->pages[i], PAGE_SIZE, 0);
70 }
71
72 buf->handler.refcount = &buf->refcount;
73 buf->handler.put = vb2_dma_sg_put;
74 buf->handler.arg = buf;
75
76 atomic_inc(&buf->refcount);
77
78 printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
79 __func__, buf->sg_desc.num_pages);
80
81 if (!buf->vaddr)
82 buf->vaddr = vm_map_ram(buf->pages,
83 buf->sg_desc.num_pages,
84 -1,
85 PAGE_KERNEL);
86 return buf;
87
88fail_pages_alloc:
89 while (--i >= 0)
90 __free_page(buf->pages[i]);
91
92fail_pages_array_alloc:
93 vfree(buf->sg_desc.sglist);
94
95fail_sglist_alloc:
96 kfree(buf);
97 return NULL;
98}
99
100static void vb2_dma_sg_put(void *buf_priv)
101{
102 struct vb2_dma_sg_buf *buf = buf_priv;
103 int i = buf->sg_desc.num_pages;
104
105 if (atomic_dec_and_test(&buf->refcount)) {
106 printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
107 buf->sg_desc.num_pages);
108 if (buf->vaddr)
109 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
110 vfree(buf->sg_desc.sglist);
111 while (--i >= 0)
112 __free_page(buf->pages[i]);
113 kfree(buf->pages);
114 kfree(buf);
115 }
116}
117
118static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
119 unsigned long size, int write)
120{
121 struct vb2_dma_sg_buf *buf;
122 unsigned long first, last;
123 int num_pages_from_user, i;
124
125 buf = kzalloc(sizeof *buf, GFP_KERNEL);
126 if (!buf)
127 return NULL;
128
129 buf->vaddr = NULL;
130 buf->write = write;
131 buf->offset = vaddr & ~PAGE_MASK;
132 buf->sg_desc.size = size;
133
134 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
135 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
136 buf->sg_desc.num_pages = last - first + 1;
137
138 buf->sg_desc.sglist = vmalloc(
139 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
140 if (!buf->sg_desc.sglist)
141 goto userptr_fail_sglist_alloc;
142
143 memset(buf->sg_desc.sglist, 0,
144 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
145 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
146
147 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
148 GFP_KERNEL);
149 if (!buf->pages)
150 goto userptr_fail_pages_array_alloc;
151
152 down_read(&current->mm->mmap_sem);
153 num_pages_from_user = get_user_pages(current, current->mm,
154 vaddr & PAGE_MASK,
155 buf->sg_desc.num_pages,
156 write,
157 1, /* force */
158 buf->pages,
159 NULL);
160 up_read(&current->mm->mmap_sem);
161 if (num_pages_from_user != buf->sg_desc.num_pages)
162 goto userptr_fail_get_user_pages;
163
164 sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
165 PAGE_SIZE - buf->offset, buf->offset);
166 size -= PAGE_SIZE - buf->offset;
167 for (i = 1; i < buf->sg_desc.num_pages; ++i) {
168 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
169 min_t(size_t, PAGE_SIZE, size), 0);
170 size -= min_t(size_t, PAGE_SIZE, size);
171 }
172 return buf;
173
174userptr_fail_get_user_pages:
175 printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
176 num_pages_from_user, buf->sg_desc.num_pages);
177 while (--num_pages_from_user >= 0)
178 put_page(buf->pages[num_pages_from_user]);
179
180userptr_fail_pages_array_alloc:
181 vfree(buf->sg_desc.sglist);
182
183userptr_fail_sglist_alloc:
184 kfree(buf);
185 return NULL;
186}
187
188/*
189 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
190 * be used
191 */
192static void vb2_dma_sg_put_userptr(void *buf_priv)
193{
194 struct vb2_dma_sg_buf *buf = buf_priv;
195 int i = buf->sg_desc.num_pages;
196
197 printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
198 __func__, buf->sg_desc.num_pages);
199 if (buf->vaddr)
200 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
201 while (--i >= 0) {
202 if (buf->write)
203 set_page_dirty_lock(buf->pages[i]);
204 put_page(buf->pages[i]);
205 }
206 vfree(buf->sg_desc.sglist);
207 kfree(buf->pages);
208 kfree(buf);
209}
210
211static void *vb2_dma_sg_vaddr(void *buf_priv)
212{
213 struct vb2_dma_sg_buf *buf = buf_priv;
214
215 BUG_ON(!buf);
216
217 if (!buf->vaddr)
218 buf->vaddr = vm_map_ram(buf->pages,
219 buf->sg_desc.num_pages,
220 -1,
221 PAGE_KERNEL);
222
223 /* add offset in case userptr is not page-aligned */
224 return buf->vaddr + buf->offset;
225}
226
227static unsigned int vb2_dma_sg_num_users(void *buf_priv)
228{
229 struct vb2_dma_sg_buf *buf = buf_priv;
230
231 return atomic_read(&buf->refcount);
232}
233
234static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
235{
236 struct vb2_dma_sg_buf *buf = buf_priv;
237 unsigned long uaddr = vma->vm_start;
238 unsigned long usize = vma->vm_end - vma->vm_start;
239 int i = 0;
240
241 if (!buf) {
242 printk(KERN_ERR "No memory to map\n");
243 return -EINVAL;
244 }
245
246 do {
247 int ret;
248
249 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
250 if (ret) {
251 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
252 return ret;
253 }
254
255 uaddr += PAGE_SIZE;
256 usize -= PAGE_SIZE;
257 } while (usize > 0);
258
259
260 /*
261 * Use common vm_area operations to track buffer refcount.
262 */
263 vma->vm_private_data = &buf->handler;
264 vma->vm_ops = &vb2_common_vm_ops;
265
266 vma->vm_ops->open(vma);
267
268 return 0;
269}
270
271static void *vb2_dma_sg_cookie(void *buf_priv)
272{
273 struct vb2_dma_sg_buf *buf = buf_priv;
274
275 return &buf->sg_desc;
276}
277
278const struct vb2_mem_ops vb2_dma_sg_memops = {
279 .alloc = vb2_dma_sg_alloc,
280 .put = vb2_dma_sg_put,
281 .get_userptr = vb2_dma_sg_get_userptr,
282 .put_userptr = vb2_dma_sg_put_userptr,
283 .vaddr = vb2_dma_sg_vaddr,
284 .mmap = vb2_dma_sg_mmap,
285 .num_users = vb2_dma_sg_num_users,
286 .cookie = vb2_dma_sg_cookie,
287};
288EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
289
290MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
291MODULE_AUTHOR("Andrzej Pietrasiewicz");
292MODULE_LICENSE("GPL");