]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/media/v4l2-core/videobuf2-dma-sg.c
[media] videobuf2: add gfp_flags
[mirror_ubuntu-artful-kernel.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
CommitLineData
5ba3f757
AP
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
24struct vb2_dma_sg_buf {
25 void *vaddr;
26 struct page **pages;
27 int write;
28 int offset;
29 struct vb2_dma_sg_desc sg_desc;
30 atomic_t refcount;
31 struct vb2_vmarea_handler handler;
32};
33
34static void vb2_dma_sg_put(void *buf_priv);
35
b6ba2057 36static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
5ba3f757
AP
37{
38 struct vb2_dma_sg_buf *buf;
39 int i;
40
41 buf = kzalloc(sizeof *buf, GFP_KERNEL);
42 if (!buf)
43 return NULL;
44
45 buf->vaddr = NULL;
46 buf->write = 0;
47 buf->offset = 0;
48 buf->sg_desc.size = size;
49 buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
50
fabc6b85 51 buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
5ba3f757
AP
52 sizeof(*buf->sg_desc.sglist));
53 if (!buf->sg_desc.sglist)
54 goto fail_sglist_alloc;
5ba3f757
AP
55 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
56
57 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
58 GFP_KERNEL);
59 if (!buf->pages)
60 goto fail_pages_array_alloc;
61
62 for (i = 0; i < buf->sg_desc.num_pages; ++i) {
b6ba2057
HV
63 buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO |
64 __GFP_NOWARN | gfp_flags);
5ba3f757
AP
65 if (NULL == buf->pages[i])
66 goto fail_pages_alloc;
67 sg_set_page(&buf->sg_desc.sglist[i],
68 buf->pages[i], PAGE_SIZE, 0);
69 }
70
71 buf->handler.refcount = &buf->refcount;
72 buf->handler.put = vb2_dma_sg_put;
73 buf->handler.arg = buf;
74
75 atomic_inc(&buf->refcount);
76
77 printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
78 __func__, buf->sg_desc.num_pages);
5ba3f757
AP
79 return buf;
80
81fail_pages_alloc:
82 while (--i >= 0)
83 __free_page(buf->pages[i]);
a9bb36aa 84 kfree(buf->pages);
5ba3f757
AP
85
86fail_pages_array_alloc:
87 vfree(buf->sg_desc.sglist);
88
89fail_sglist_alloc:
90 kfree(buf);
91 return NULL;
92}
93
94static void vb2_dma_sg_put(void *buf_priv)
95{
96 struct vb2_dma_sg_buf *buf = buf_priv;
97 int i = buf->sg_desc.num_pages;
98
99 if (atomic_dec_and_test(&buf->refcount)) {
100 printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
101 buf->sg_desc.num_pages);
102 if (buf->vaddr)
103 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
104 vfree(buf->sg_desc.sglist);
105 while (--i >= 0)
106 __free_page(buf->pages[i]);
107 kfree(buf->pages);
108 kfree(buf);
109 }
110}
111
112static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
113 unsigned long size, int write)
114{
115 struct vb2_dma_sg_buf *buf;
116 unsigned long first, last;
117 int num_pages_from_user, i;
118
119 buf = kzalloc(sizeof *buf, GFP_KERNEL);
120 if (!buf)
121 return NULL;
122
123 buf->vaddr = NULL;
124 buf->write = write;
125 buf->offset = vaddr & ~PAGE_MASK;
126 buf->sg_desc.size = size;
127
128 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
129 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
130 buf->sg_desc.num_pages = last - first + 1;
131
fabc6b85 132 buf->sg_desc.sglist = vzalloc(
5ba3f757
AP
133 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
134 if (!buf->sg_desc.sglist)
135 goto userptr_fail_sglist_alloc;
136
5ba3f757
AP
137 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
138
139 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
140 GFP_KERNEL);
141 if (!buf->pages)
142 goto userptr_fail_pages_array_alloc;
143
5ba3f757
AP
144 num_pages_from_user = get_user_pages(current, current->mm,
145 vaddr & PAGE_MASK,
146 buf->sg_desc.num_pages,
147 write,
148 1, /* force */
149 buf->pages,
150 NULL);
b037c0fd 151
5ba3f757
AP
152 if (num_pages_from_user != buf->sg_desc.num_pages)
153 goto userptr_fail_get_user_pages;
154
155 sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
156 PAGE_SIZE - buf->offset, buf->offset);
157 size -= PAGE_SIZE - buf->offset;
158 for (i = 1; i < buf->sg_desc.num_pages; ++i) {
159 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
160 min_t(size_t, PAGE_SIZE, size), 0);
161 size -= min_t(size_t, PAGE_SIZE, size);
162 }
163 return buf;
164
165userptr_fail_get_user_pages:
166 printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
167 num_pages_from_user, buf->sg_desc.num_pages);
168 while (--num_pages_from_user >= 0)
169 put_page(buf->pages[num_pages_from_user]);
a9bb36aa 170 kfree(buf->pages);
5ba3f757
AP
171
172userptr_fail_pages_array_alloc:
173 vfree(buf->sg_desc.sglist);
174
175userptr_fail_sglist_alloc:
176 kfree(buf);
177 return NULL;
178}
179
180/*
181 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
182 * be used
183 */
184static void vb2_dma_sg_put_userptr(void *buf_priv)
185{
186 struct vb2_dma_sg_buf *buf = buf_priv;
187 int i = buf->sg_desc.num_pages;
188
189 printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
190 __func__, buf->sg_desc.num_pages);
191 if (buf->vaddr)
192 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
193 while (--i >= 0) {
194 if (buf->write)
195 set_page_dirty_lock(buf->pages[i]);
196 put_page(buf->pages[i]);
197 }
198 vfree(buf->sg_desc.sglist);
199 kfree(buf->pages);
200 kfree(buf);
201}
202
203static void *vb2_dma_sg_vaddr(void *buf_priv)
204{
205 struct vb2_dma_sg_buf *buf = buf_priv;
206
207 BUG_ON(!buf);
208
209 if (!buf->vaddr)
210 buf->vaddr = vm_map_ram(buf->pages,
211 buf->sg_desc.num_pages,
212 -1,
213 PAGE_KERNEL);
214
215 /* add offset in case userptr is not page-aligned */
216 return buf->vaddr + buf->offset;
217}
218
219static unsigned int vb2_dma_sg_num_users(void *buf_priv)
220{
221 struct vb2_dma_sg_buf *buf = buf_priv;
222
223 return atomic_read(&buf->refcount);
224}
225
226static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
227{
228 struct vb2_dma_sg_buf *buf = buf_priv;
229 unsigned long uaddr = vma->vm_start;
230 unsigned long usize = vma->vm_end - vma->vm_start;
231 int i = 0;
232
233 if (!buf) {
234 printk(KERN_ERR "No memory to map\n");
235 return -EINVAL;
236 }
237
238 do {
239 int ret;
240
241 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
242 if (ret) {
243 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
244 return ret;
245 }
246
247 uaddr += PAGE_SIZE;
248 usize -= PAGE_SIZE;
249 } while (usize > 0);
250
251
252 /*
253 * Use common vm_area operations to track buffer refcount.
254 */
255 vma->vm_private_data = &buf->handler;
256 vma->vm_ops = &vb2_common_vm_ops;
257
258 vma->vm_ops->open(vma);
259
260 return 0;
261}
262
263static void *vb2_dma_sg_cookie(void *buf_priv)
264{
265 struct vb2_dma_sg_buf *buf = buf_priv;
266
267 return &buf->sg_desc;
268}
269
270const struct vb2_mem_ops vb2_dma_sg_memops = {
271 .alloc = vb2_dma_sg_alloc,
272 .put = vb2_dma_sg_put,
273 .get_userptr = vb2_dma_sg_get_userptr,
274 .put_userptr = vb2_dma_sg_put_userptr,
275 .vaddr = vb2_dma_sg_vaddr,
276 .mmap = vb2_dma_sg_mmap,
277 .num_users = vb2_dma_sg_num_users,
278 .cookie = vb2_dma_sg_cookie,
279};
280EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
281
282MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
283MODULE_AUTHOR("Andrzej Pietrasiewicz");
284MODULE_LICENSE("GPL");