]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/media/v4l2-core/videobuf2-dma-sg.c
media: v4l2-core: fix touch support in v4l_g_fmt
[mirror_ubuntu-bionic-kernel.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
CommitLineData
5ba3f757
AP
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
6c4bb65d 15#include <linux/refcount.h>
5ba3f757
AP
16#include <linux/scatterlist.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20
c139990e 21#include <media/videobuf2-v4l2.h>
5ba3f757
AP
22#include <media/videobuf2-memops.h>
23#include <media/videobuf2-dma-sg.h>
24
ffdc78ef
HV
25static int debug;
26module_param(debug, int, 0644);
27
28#define dprintk(level, fmt, arg...) \
29 do { \
30 if (debug >= level) \
31 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
32 } while (0)
33
5ba3f757 34struct vb2_dma_sg_buf {
0c3a14c1 35 struct device *dev;
5ba3f757
AP
36 void *vaddr;
37 struct page **pages;
3336c24f 38 struct frame_vector *vec;
5ba3f757 39 int offset;
cd474037 40 enum dma_data_direction dma_dir;
22301247 41 struct sg_table sg_table;
e078b79d
HV
42 /*
43 * This will point to sg_table when used with the MMAP or USERPTR
44 * memory model, and to the dma_buf sglist when used with the
45 * DMABUF memory model.
46 */
47 struct sg_table *dma_sgt;
22301247
RRD
48 size_t size;
49 unsigned int num_pages;
6c4bb65d 50 refcount_t refcount;
5ba3f757 51 struct vb2_vmarea_handler handler;
e078b79d
HV
52
53 struct dma_buf_attachment *db_attach;
5ba3f757
AP
54};
55
56static void vb2_dma_sg_put(void *buf_priv);
57
df237281
RRD
58static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
59 gfp_t gfp_flags)
60{
61 unsigned int last_page = 0;
c4680984 62 unsigned long size = buf->size;
df237281
RRD
63
64 while (size > 0) {
65 struct page *pages;
66 int order;
67 int i;
68
69 order = get_order(size);
70 /* Dont over allocate*/
71 if ((PAGE_SIZE << order) > size)
72 order--;
73
74 pages = NULL;
75 while (!pages) {
76 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
77 __GFP_NOWARN | gfp_flags, order);
78 if (pages)
79 break;
80
81 if (order == 0) {
82 while (last_page--)
83 __free_page(buf->pages[last_page]);
84 return -ENOMEM;
85 }
86 order--;
87 }
88
89 split_page(pages, order);
22301247
RRD
90 for (i = 0; i < (1 << order); i++)
91 buf->pages[last_page++] = &pages[i];
df237281
RRD
92
93 size -= PAGE_SIZE << order;
94 }
95
96 return 0;
97}
98
00085f1e 99static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
d16e832d
HV
100 unsigned long size, enum dma_data_direction dma_dir,
101 gfp_t gfp_flags)
5ba3f757
AP
102{
103 struct vb2_dma_sg_buf *buf;
d790b7ed 104 struct sg_table *sgt;
df237281 105 int ret;
22301247 106 int num_pages;
5ba3f757 107
0ff657b0
HV
108 if (WARN_ON(!dev))
109 return ERR_PTR(-EINVAL);
110
5ba3f757
AP
111 buf = kzalloc(sizeof *buf, GFP_KERNEL);
112 if (!buf)
0ff657b0 113 return ERR_PTR(-ENOMEM);
5ba3f757
AP
114
115 buf->vaddr = NULL;
d935c57e 116 buf->dma_dir = dma_dir;
5ba3f757 117 buf->offset = 0;
22301247 118 buf->size = size;
7f841459 119 /* size is already page aligned */
22301247 120 buf->num_pages = size >> PAGE_SHIFT;
e078b79d 121 buf->dma_sgt = &buf->sg_table;
5ba3f757 122
758d90e1
TF
123 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
124 GFP_KERNEL | __GFP_ZERO);
5ba3f757
AP
125 if (!buf->pages)
126 goto fail_pages_array_alloc;
127
df237281
RRD
128 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
129 if (ret)
130 goto fail_pages_alloc;
5ba3f757 131
e078b79d 132 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
47bc59c5 133 buf->num_pages, 0, size, GFP_KERNEL);
22301247
RRD
134 if (ret)
135 goto fail_table_alloc;
136
0c3a14c1 137 /* Prevent the device from being released while the buffer is used */
36c0f8b3 138 buf->dev = get_device(dev);
d790b7ed
HV
139
140 sgt = &buf->sg_table;
251a79f8
HV
141 /*
142 * No need to sync to the device, this will happen later when the
143 * prepare() memop is called.
144 */
6a5d77cb 145 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
00085f1e 146 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
6a5d77cb 147 if (!sgt->nents)
d790b7ed 148 goto fail_map;
d790b7ed 149
5ba3f757
AP
150 buf->handler.refcount = &buf->refcount;
151 buf->handler.put = vb2_dma_sg_put;
152 buf->handler.arg = buf;
153
6c4bb65d 154 refcount_set(&buf->refcount, 1);
5ba3f757 155
ffdc78ef 156 dprintk(1, "%s: Allocated buffer of %d pages\n",
22301247 157 __func__, buf->num_pages);
5ba3f757
AP
158 return buf;
159
d790b7ed
HV
160fail_map:
161 put_device(buf->dev);
e078b79d 162 sg_free_table(buf->dma_sgt);
22301247
RRD
163fail_table_alloc:
164 num_pages = buf->num_pages;
165 while (num_pages--)
166 __free_page(buf->pages[num_pages]);
5ba3f757 167fail_pages_alloc:
758d90e1 168 kvfree(buf->pages);
5ba3f757 169fail_pages_array_alloc:
5ba3f757 170 kfree(buf);
0ff657b0 171 return ERR_PTR(-ENOMEM);
5ba3f757
AP
172}
173
174static void vb2_dma_sg_put(void *buf_priv)
175{
176 struct vb2_dma_sg_buf *buf = buf_priv;
d790b7ed 177 struct sg_table *sgt = &buf->sg_table;
22301247 178 int i = buf->num_pages;
5ba3f757 179
6c4bb65d 180 if (refcount_dec_and_test(&buf->refcount)) {
ffdc78ef 181 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
22301247 182 buf->num_pages);
6a5d77cb 183 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
00085f1e 184 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
5ba3f757 185 if (buf->vaddr)
22301247 186 vm_unmap_ram(buf->vaddr, buf->num_pages);
e078b79d 187 sg_free_table(buf->dma_sgt);
5ba3f757
AP
188 while (--i >= 0)
189 __free_page(buf->pages[i]);
758d90e1 190 kvfree(buf->pages);
0c3a14c1 191 put_device(buf->dev);
5ba3f757
AP
192 kfree(buf);
193 }
194}
195
d790b7ed
HV
196static void vb2_dma_sg_prepare(void *buf_priv)
197{
198 struct vb2_dma_sg_buf *buf = buf_priv;
e078b79d
HV
199 struct sg_table *sgt = buf->dma_sgt;
200
201 /* DMABUF exporter will flush the cache for us */
202 if (buf->db_attach)
203 return;
d790b7ed 204
418dae22
TL
205 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
206 buf->dma_dir);
d790b7ed
HV
207}
208
209static void vb2_dma_sg_finish(void *buf_priv)
210{
211 struct vb2_dma_sg_buf *buf = buf_priv;
e078b79d
HV
212 struct sg_table *sgt = buf->dma_sgt;
213
214 /* DMABUF exporter will flush the cache for us */
215 if (buf->db_attach)
216 return;
d790b7ed 217
418dae22 218 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
d790b7ed
HV
219}
220
36c0f8b3 221static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
cd474037
HV
222 unsigned long size,
223 enum dma_data_direction dma_dir)
5ba3f757
AP
224{
225 struct vb2_dma_sg_buf *buf;
d790b7ed 226 struct sg_table *sgt;
3336c24f 227 struct frame_vector *vec;
251a79f8 228
10791829
HV
229 if (WARN_ON(!dev))
230 return ERR_PTR(-EINVAL);
231
5ba3f757
AP
232 buf = kzalloc(sizeof *buf, GFP_KERNEL);
233 if (!buf)
0ff657b0 234 return ERR_PTR(-ENOMEM);
5ba3f757
AP
235
236 buf->vaddr = NULL;
36c0f8b3 237 buf->dev = dev;
cd474037 238 buf->dma_dir = dma_dir;
5ba3f757 239 buf->offset = vaddr & ~PAGE_MASK;
22301247 240 buf->size = size;
e078b79d 241 buf->dma_sgt = &buf->sg_table;
5b6f9abe
SV
242 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
243 dma_dir == DMA_BIDIRECTIONAL);
3336c24f
JK
244 if (IS_ERR(vec))
245 goto userptr_fail_pfnvec;
246 buf->vec = vec;
5ba3f757 247
3336c24f
JK
248 buf->pages = frame_vector_pages(vec);
249 if (IS_ERR(buf->pages))
250 goto userptr_fail_sgtable;
251 buf->num_pages = frame_vector_count(vec);
5ba3f757 252
e078b79d 253 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
22301247 254 buf->num_pages, buf->offset, size, 0))
3336c24f 255 goto userptr_fail_sgtable;
22301247 256
d790b7ed 257 sgt = &buf->sg_table;
251a79f8
HV
258 /*
259 * No need to sync to the device, this will happen later when the
260 * prepare() memop is called.
261 */
6a5d77cb 262 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
00085f1e 263 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
6a5d77cb 264 if (!sgt->nents)
d790b7ed 265 goto userptr_fail_map;
6a5d77cb 266
5ba3f757
AP
267 return buf;
268
d790b7ed
HV
269userptr_fail_map:
270 sg_free_table(&buf->sg_table);
3336c24f
JK
271userptr_fail_sgtable:
272 vb2_destroy_framevec(vec);
273userptr_fail_pfnvec:
5ba3f757 274 kfree(buf);
0ff657b0 275 return ERR_PTR(-ENOMEM);
5ba3f757
AP
276}
277
278/*
279 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
280 * be used
281 */
282static void vb2_dma_sg_put_userptr(void *buf_priv)
283{
284 struct vb2_dma_sg_buf *buf = buf_priv;
d790b7ed 285 struct sg_table *sgt = &buf->sg_table;
22301247 286 int i = buf->num_pages;
5ba3f757 287
ffdc78ef 288 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
22301247 289 __func__, buf->num_pages);
6a5d77cb 290 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
00085f1e 291 DMA_ATTR_SKIP_CPU_SYNC);
5ba3f757 292 if (buf->vaddr)
22301247 293 vm_unmap_ram(buf->vaddr, buf->num_pages);
e078b79d 294 sg_free_table(buf->dma_sgt);
5ba3f757 295 while (--i >= 0) {
5b6f9abe
SV
296 if (buf->dma_dir == DMA_FROM_DEVICE ||
297 buf->dma_dir == DMA_BIDIRECTIONAL)
5ba3f757 298 set_page_dirty_lock(buf->pages[i]);
5ba3f757 299 }
3336c24f 300 vb2_destroy_framevec(buf->vec);
5ba3f757
AP
301 kfree(buf);
302}
303
304static void *vb2_dma_sg_vaddr(void *buf_priv)
305{
306 struct vb2_dma_sg_buf *buf = buf_priv;
307
308 BUG_ON(!buf);
309
e078b79d
HV
310 if (!buf->vaddr) {
311 if (buf->db_attach)
312 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
313 else
314 buf->vaddr = vm_map_ram(buf->pages,
315 buf->num_pages, -1, PAGE_KERNEL);
316 }
5ba3f757
AP
317
318 /* add offset in case userptr is not page-aligned */
e078b79d 319 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
5ba3f757
AP
320}
321
322static unsigned int vb2_dma_sg_num_users(void *buf_priv)
323{
324 struct vb2_dma_sg_buf *buf = buf_priv;
325
6c4bb65d 326 return refcount_read(&buf->refcount);
5ba3f757
AP
327}
328
329static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
330{
331 struct vb2_dma_sg_buf *buf = buf_priv;
332 unsigned long uaddr = vma->vm_start;
333 unsigned long usize = vma->vm_end - vma->vm_start;
334 int i = 0;
335
336 if (!buf) {
337 printk(KERN_ERR "No memory to map\n");
338 return -EINVAL;
339 }
340
341 do {
342 int ret;
343
344 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
345 if (ret) {
346 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
347 return ret;
348 }
349
350 uaddr += PAGE_SIZE;
351 usize -= PAGE_SIZE;
352 } while (usize > 0);
353
354
355 /*
356 * Use common vm_area operations to track buffer refcount.
357 */
358 vma->vm_private_data = &buf->handler;
359 vma->vm_ops = &vb2_common_vm_ops;
360
361 vma->vm_ops->open(vma);
362
363 return 0;
364}
365
041c7b6a
HV
366/*********************************************/
367/* DMABUF ops for exporters */
368/*********************************************/
369
370struct vb2_dma_sg_attachment {
371 struct sg_table sgt;
372 enum dma_data_direction dma_dir;
373};
374
375static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
376 struct dma_buf_attachment *dbuf_attach)
377{
378 struct vb2_dma_sg_attachment *attach;
379 unsigned int i;
380 struct scatterlist *rd, *wr;
381 struct sg_table *sgt;
382 struct vb2_dma_sg_buf *buf = dbuf->priv;
383 int ret;
384
385 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
386 if (!attach)
387 return -ENOMEM;
388
389 sgt = &attach->sgt;
390 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
391 * map the same scatter list to multiple attachments at the same time.
392 */
393 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
394 if (ret) {
395 kfree(attach);
396 return -ENOMEM;
397 }
398
399 rd = buf->dma_sgt->sgl;
400 wr = sgt->sgl;
401 for (i = 0; i < sgt->orig_nents; ++i) {
402 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
403 rd = sg_next(rd);
404 wr = sg_next(wr);
405 }
406
407 attach->dma_dir = DMA_NONE;
408 dbuf_attach->priv = attach;
409
410 return 0;
411}
412
413static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
414 struct dma_buf_attachment *db_attach)
415{
416 struct vb2_dma_sg_attachment *attach = db_attach->priv;
417 struct sg_table *sgt;
418
419 if (!attach)
420 return;
421
422 sgt = &attach->sgt;
423
424 /* release the scatterlist cache */
425 if (attach->dma_dir != DMA_NONE)
426 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
427 attach->dma_dir);
428 sg_free_table(sgt);
429 kfree(attach);
430 db_attach->priv = NULL;
431}
432
433static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
434 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
435{
436 struct vb2_dma_sg_attachment *attach = db_attach->priv;
437 /* stealing dmabuf mutex to serialize map/unmap operations */
438 struct mutex *lock = &db_attach->dmabuf->lock;
439 struct sg_table *sgt;
041c7b6a
HV
440
441 mutex_lock(lock);
442
443 sgt = &attach->sgt;
444 /* return previously mapped sg table */
445 if (attach->dma_dir == dma_dir) {
446 mutex_unlock(lock);
447 return sgt;
448 }
449
450 /* release any previous cache */
451 if (attach->dma_dir != DMA_NONE) {
452 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
453 attach->dma_dir);
454 attach->dma_dir = DMA_NONE;
455 }
456
457 /* mapping to the client with new direction */
6a5d77cb
RRD
458 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
459 dma_dir);
460 if (!sgt->nents) {
041c7b6a
HV
461 pr_err("failed to map scatterlist\n");
462 mutex_unlock(lock);
463 return ERR_PTR(-EIO);
464 }
465
466 attach->dma_dir = dma_dir;
467
468 mutex_unlock(lock);
469
470 return sgt;
471}
472
473static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
474 struct sg_table *sgt, enum dma_data_direction dma_dir)
475{
476 /* nothing to be done here */
477}
478
479static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
480{
481 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
482 vb2_dma_sg_put(dbuf->priv);
483}
484
485static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
486{
487 struct vb2_dma_sg_buf *buf = dbuf->priv;
488
489 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
490}
491
492static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
493{
494 struct vb2_dma_sg_buf *buf = dbuf->priv;
495
496 return vb2_dma_sg_vaddr(buf);
497}
498
499static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
500 struct vm_area_struct *vma)
501{
502 return vb2_dma_sg_mmap(dbuf->priv, vma);
503}
504
efaf515f 505static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
041c7b6a
HV
506 .attach = vb2_dma_sg_dmabuf_ops_attach,
507 .detach = vb2_dma_sg_dmabuf_ops_detach,
508 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
509 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
f9b67f00
LG
510 .map = vb2_dma_sg_dmabuf_ops_kmap,
511 .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
041c7b6a
HV
512 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
513 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
514 .release = vb2_dma_sg_dmabuf_ops_release,
515};
516
517static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
518{
519 struct vb2_dma_sg_buf *buf = buf_priv;
520 struct dma_buf *dbuf;
d8fbe341
SS
521 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
522
523 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
524 exp_info.size = buf->size;
525 exp_info.flags = flags;
526 exp_info.priv = buf;
041c7b6a
HV
527
528 if (WARN_ON(!buf->dma_sgt))
529 return NULL;
530
d8fbe341 531 dbuf = dma_buf_export(&exp_info);
041c7b6a
HV
532 if (IS_ERR(dbuf))
533 return NULL;
534
535 /* dmabuf keeps reference to vb2 buffer */
6c4bb65d 536 refcount_inc(&buf->refcount);
041c7b6a
HV
537
538 return dbuf;
539}
540
e078b79d
HV
541/*********************************************/
542/* callbacks for DMABUF buffers */
543/*********************************************/
544
545static int vb2_dma_sg_map_dmabuf(void *mem_priv)
546{
547 struct vb2_dma_sg_buf *buf = mem_priv;
548 struct sg_table *sgt;
549
550 if (WARN_ON(!buf->db_attach)) {
551 pr_err("trying to pin a non attached buffer\n");
552 return -EINVAL;
553 }
554
555 if (WARN_ON(buf->dma_sgt)) {
556 pr_err("dmabuf buffer is already pinned\n");
557 return 0;
558 }
559
560 /* get the associated scatterlist for this buffer */
561 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
562 if (IS_ERR(sgt)) {
563 pr_err("Error getting dmabuf scatterlist\n");
564 return -EINVAL;
565 }
566
567 buf->dma_sgt = sgt;
568 buf->vaddr = NULL;
569
570 return 0;
571}
572
573static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
574{
575 struct vb2_dma_sg_buf *buf = mem_priv;
576 struct sg_table *sgt = buf->dma_sgt;
577
578 if (WARN_ON(!buf->db_attach)) {
579 pr_err("trying to unpin a not attached buffer\n");
580 return;
581 }
582
583 if (WARN_ON(!sgt)) {
584 pr_err("dmabuf buffer is already unpinned\n");
585 return;
586 }
587
588 if (buf->vaddr) {
589 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
590 buf->vaddr = NULL;
591 }
592 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
593
594 buf->dma_sgt = NULL;
595}
596
597static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
598{
599 struct vb2_dma_sg_buf *buf = mem_priv;
600
601 /* if vb2 works correctly you should never detach mapped buffer */
602 if (WARN_ON(buf->dma_sgt))
603 vb2_dma_sg_unmap_dmabuf(buf);
604
605 /* detach this attachment */
606 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
607 kfree(buf);
608}
609
36c0f8b3 610static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
e078b79d
HV
611 unsigned long size, enum dma_data_direction dma_dir)
612{
e078b79d
HV
613 struct vb2_dma_sg_buf *buf;
614 struct dma_buf_attachment *dba;
615
10791829
HV
616 if (WARN_ON(!dev))
617 return ERR_PTR(-EINVAL);
618
e078b79d
HV
619 if (dbuf->size < size)
620 return ERR_PTR(-EFAULT);
621
622 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
623 if (!buf)
624 return ERR_PTR(-ENOMEM);
625
36c0f8b3 626 buf->dev = dev;
e078b79d
HV
627 /* create attachment for the dmabuf with the user device */
628 dba = dma_buf_attach(dbuf, buf->dev);
629 if (IS_ERR(dba)) {
630 pr_err("failed to attach dmabuf\n");
631 kfree(buf);
632 return dba;
633 }
634
635 buf->dma_dir = dma_dir;
636 buf->size = size;
637 buf->db_attach = dba;
638
639 return buf;
640}
641
5ba3f757
AP
642static void *vb2_dma_sg_cookie(void *buf_priv)
643{
644 struct vb2_dma_sg_buf *buf = buf_priv;
645
e078b79d 646 return buf->dma_sgt;
5ba3f757
AP
647}
648
649const struct vb2_mem_ops vb2_dma_sg_memops = {
650 .alloc = vb2_dma_sg_alloc,
651 .put = vb2_dma_sg_put,
652 .get_userptr = vb2_dma_sg_get_userptr,
653 .put_userptr = vb2_dma_sg_put_userptr,
d790b7ed
HV
654 .prepare = vb2_dma_sg_prepare,
655 .finish = vb2_dma_sg_finish,
5ba3f757
AP
656 .vaddr = vb2_dma_sg_vaddr,
657 .mmap = vb2_dma_sg_mmap,
658 .num_users = vb2_dma_sg_num_users,
041c7b6a 659 .get_dmabuf = vb2_dma_sg_get_dmabuf,
e078b79d
HV
660 .map_dmabuf = vb2_dma_sg_map_dmabuf,
661 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
662 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
663 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
5ba3f757
AP
664 .cookie = vb2_dma_sg_cookie,
665};
666EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
667
668MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
669MODULE_AUTHOR("Andrzej Pietrasiewicz");
670MODULE_LICENSE("GPL");