]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/media/v4l2-core/videobuf2-dma-contig.c
Merge tag 'nfs-for-4.13-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[mirror_ubuntu-artful-kernel.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
CommitLineData
1a758d4e
PO
1/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
1a758d4e
PO
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
8c417d03 13#include <linux/dma-buf.h>
1a758d4e 14#include <linux/module.h>
6c4bb65d 15#include <linux/refcount.h>
e15dab75
TS
16#include <linux/scatterlist.h>
17#include <linux/sched.h>
1a758d4e
PO
18#include <linux/slab.h>
19#include <linux/dma-mapping.h>
20
c139990e 21#include <media/videobuf2-v4l2.h>
d0df3c38 22#include <media/videobuf2-dma-contig.h>
1a758d4e
PO
23#include <media/videobuf2-memops.h>
24
1a758d4e 25struct vb2_dc_buf {
72f86bff 26 struct device *dev;
1a758d4e 27 void *vaddr;
1a758d4e 28 unsigned long size;
ccc66e73 29 void *cookie;
40d8b766 30 dma_addr_t dma_addr;
00085f1e 31 unsigned long attrs;
e15dab75
TS
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
fb639eb3 34 struct frame_vector *vec;
40d8b766
LP
35
36 /* MMAP related */
1a758d4e 37 struct vb2_vmarea_handler handler;
6c4bb65d 38 refcount_t refcount;
9ef2cbeb 39 struct sg_table *sgt_base;
40d8b766 40
8c417d03
SS
41 /* DMABUF related */
42 struct dma_buf_attachment *db_attach;
1a758d4e
PO
43};
44
e15dab75
TS
45/*********************************************/
46/* scatterlist table functions */
47/*********************************************/
48
e15dab75
TS
49static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50{
51 struct scatterlist *s;
52 dma_addr_t expected = sg_dma_address(sgt->sgl);
53 unsigned int i;
54 unsigned long size = 0;
55
56 for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 if (sg_dma_address(s) != expected)
58 break;
59 expected = sg_dma_address(s) + sg_dma_len(s);
60 size += sg_dma_len(s);
61 }
62 return size;
63}
64
40d8b766
LP
65/*********************************************/
66/* callbacks for all buffers */
67/*********************************************/
68
69static void *vb2_dc_cookie(void *buf_priv)
70{
71 struct vb2_dc_buf *buf = buf_priv;
72
73 return &buf->dma_addr;
74}
75
76static void *vb2_dc_vaddr(void *buf_priv)
77{
78 struct vb2_dc_buf *buf = buf_priv;
79
6bbd4fec
PZ
80 if (!buf->vaddr && buf->db_attach)
81 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82
40d8b766
LP
83 return buf->vaddr;
84}
85
86static unsigned int vb2_dc_num_users(void *buf_priv)
87{
88 struct vb2_dc_buf *buf = buf_priv;
89
6c4bb65d 90 return refcount_read(&buf->refcount);
40d8b766
LP
91}
92
199d101e
MS
93static void vb2_dc_prepare(void *buf_priv)
94{
95 struct vb2_dc_buf *buf = buf_priv;
96 struct sg_table *sgt = buf->dma_sgt;
97
8c417d03
SS
98 /* DMABUF exporter will flush the cache for us */
99 if (!sgt || buf->db_attach)
199d101e
MS
100 return;
101
d9a98588
TL
102 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103 buf->dma_dir);
199d101e
MS
104}
105
106static void vb2_dc_finish(void *buf_priv)
107{
108 struct vb2_dc_buf *buf = buf_priv;
109 struct sg_table *sgt = buf->dma_sgt;
110
8c417d03
SS
111 /* DMABUF exporter will flush the cache for us */
112 if (!sgt || buf->db_attach)
199d101e
MS
113 return;
114
d9a98588 115 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
199d101e
MS
116}
117
40d8b766
LP
118/*********************************************/
119/* callbacks for MMAP buffers */
120/*********************************************/
121
122static void vb2_dc_put(void *buf_priv)
123{
124 struct vb2_dc_buf *buf = buf_priv;
125
6c4bb65d 126 if (!refcount_dec_and_test(&buf->refcount))
40d8b766
LP
127 return;
128
9ef2cbeb
TS
129 if (buf->sgt_base) {
130 sg_free_table(buf->sgt_base);
131 kfree(buf->sgt_base);
132 }
ccc66e73 133 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
00085f1e 134 buf->attrs);
67a5d0ce 135 put_device(buf->dev);
40d8b766
LP
136 kfree(buf);
137}
1a758d4e 138
00085f1e 139static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
d16e832d
HV
140 unsigned long size, enum dma_data_direction dma_dir,
141 gfp_t gfp_flags)
1a758d4e 142{
1a758d4e
PO
143 struct vb2_dc_buf *buf;
144
10791829
HV
145 if (WARN_ON(!dev))
146 return ERR_PTR(-EINVAL);
147
1a758d4e
PO
148 buf = kzalloc(sizeof *buf, GFP_KERNEL);
149 if (!buf)
150 return ERR_PTR(-ENOMEM);
151
d16e832d 152 if (attrs)
00085f1e 153 buf->attrs = attrs;
ccc66e73 154 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
00085f1e 155 GFP_KERNEL | gfp_flags, buf->attrs);
ccc66e73 156 if (!buf->cookie) {
72f86bff 157 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
1a758d4e
PO
158 kfree(buf);
159 return ERR_PTR(-ENOMEM);
160 }
161
00085f1e 162 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
ccc66e73
TF
163 buf->vaddr = buf->cookie;
164
67a5d0ce
TS
165 /* Prevent the device from being released while the buffer is used */
166 buf->dev = get_device(dev);
1a758d4e 167 buf->size = size;
d935c57e 168 buf->dma_dir = dma_dir;
1a758d4e
PO
169
170 buf->handler.refcount = &buf->refcount;
f7f129ce 171 buf->handler.put = vb2_dc_put;
1a758d4e
PO
172 buf->handler.arg = buf;
173
6c4bb65d 174 refcount_set(&buf->refcount, 1);
1a758d4e
PO
175
176 return buf;
177}
178
f7f129ce 179static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
1a758d4e
PO
180{
181 struct vb2_dc_buf *buf = buf_priv;
c60520fa 182 int ret;
1a758d4e
PO
183
184 if (!buf) {
185 printk(KERN_ERR "No buffer to map\n");
186 return -EINVAL;
187 }
188
c60520fa
MS
189 /*
190 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
191 * map whole buffer
192 */
193 vma->vm_pgoff = 0;
194
ccc66e73 195 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
00085f1e 196 buf->dma_addr, buf->size, buf->attrs);
c60520fa
MS
197
198 if (ret) {
199 pr_err("Remapping memory failed, error: %d\n", ret);
200 return ret;
201 }
202
203 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
204 vma->vm_private_data = &buf->handler;
205 vma->vm_ops = &vb2_common_vm_ops;
206
207 vma->vm_ops->open(vma);
208
209 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
210 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
211 buf->size);
212
213 return 0;
1a758d4e
PO
214}
215
9ef2cbeb
TS
216/*********************************************/
217/* DMABUF ops for exporters */
218/*********************************************/
219
220struct vb2_dc_attachment {
221 struct sg_table sgt;
cd474037 222 enum dma_data_direction dma_dir;
9ef2cbeb
TS
223};
224
225static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
226 struct dma_buf_attachment *dbuf_attach)
227{
228 struct vb2_dc_attachment *attach;
229 unsigned int i;
230 struct scatterlist *rd, *wr;
231 struct sg_table *sgt;
232 struct vb2_dc_buf *buf = dbuf->priv;
233 int ret;
234
235 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
236 if (!attach)
237 return -ENOMEM;
238
239 sgt = &attach->sgt;
240 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
241 * map the same scatter list to multiple attachments at the same time.
242 */
243 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
244 if (ret) {
245 kfree(attach);
246 return -ENOMEM;
247 }
248
249 rd = buf->sgt_base->sgl;
250 wr = sgt->sgl;
251 for (i = 0; i < sgt->orig_nents; ++i) {
252 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
253 rd = sg_next(rd);
254 wr = sg_next(wr);
255 }
256
cd474037 257 attach->dma_dir = DMA_NONE;
9ef2cbeb
TS
258 dbuf_attach->priv = attach;
259
260 return 0;
261}
262
263static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
264 struct dma_buf_attachment *db_attach)
265{
266 struct vb2_dc_attachment *attach = db_attach->priv;
267 struct sg_table *sgt;
268
269 if (!attach)
270 return;
271
272 sgt = &attach->sgt;
273
274 /* release the scatterlist cache */
cd474037 275 if (attach->dma_dir != DMA_NONE)
9ef2cbeb 276 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
cd474037 277 attach->dma_dir);
9ef2cbeb
TS
278 sg_free_table(sgt);
279 kfree(attach);
280 db_attach->priv = NULL;
281}
282
283static struct sg_table *vb2_dc_dmabuf_ops_map(
cd474037 284 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
9ef2cbeb
TS
285{
286 struct vb2_dc_attachment *attach = db_attach->priv;
287 /* stealing dmabuf mutex to serialize map/unmap operations */
288 struct mutex *lock = &db_attach->dmabuf->lock;
289 struct sg_table *sgt;
9ef2cbeb
TS
290
291 mutex_lock(lock);
292
293 sgt = &attach->sgt;
294 /* return previously mapped sg table */
cd474037 295 if (attach->dma_dir == dma_dir) {
9ef2cbeb
TS
296 mutex_unlock(lock);
297 return sgt;
298 }
299
300 /* release any previous cache */
cd474037 301 if (attach->dma_dir != DMA_NONE) {
9ef2cbeb 302 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
cd474037
HV
303 attach->dma_dir);
304 attach->dma_dir = DMA_NONE;
9ef2cbeb
TS
305 }
306
307 /* mapping to the client with new direction */
60a47192
RRD
308 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
309 dma_dir);
310 if (!sgt->nents) {
9ef2cbeb
TS
311 pr_err("failed to map scatterlist\n");
312 mutex_unlock(lock);
313 return ERR_PTR(-EIO);
314 }
315
cd474037 316 attach->dma_dir = dma_dir;
9ef2cbeb
TS
317
318 mutex_unlock(lock);
319
320 return sgt;
321}
322
323static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
cd474037 324 struct sg_table *sgt, enum dma_data_direction dma_dir)
9ef2cbeb
TS
325{
326 /* nothing to be done here */
327}
328
329static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
330{
331 /* drop reference obtained in vb2_dc_get_dmabuf */
332 vb2_dc_put(dbuf->priv);
333}
334
335static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
336{
337 struct vb2_dc_buf *buf = dbuf->priv;
338
ccc66e73 339 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
9ef2cbeb
TS
340}
341
342static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
343{
344 struct vb2_dc_buf *buf = dbuf->priv;
345
346 return buf->vaddr;
347}
348
349static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
350 struct vm_area_struct *vma)
351{
352 return vb2_dc_mmap(dbuf->priv, vma);
353}
354
355static struct dma_buf_ops vb2_dc_dmabuf_ops = {
356 .attach = vb2_dc_dmabuf_ops_attach,
357 .detach = vb2_dc_dmabuf_ops_detach,
358 .map_dma_buf = vb2_dc_dmabuf_ops_map,
359 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
f9b67f00
LG
360 .map = vb2_dc_dmabuf_ops_kmap,
361 .map_atomic = vb2_dc_dmabuf_ops_kmap,
9ef2cbeb
TS
362 .vmap = vb2_dc_dmabuf_ops_vmap,
363 .mmap = vb2_dc_dmabuf_ops_mmap,
364 .release = vb2_dc_dmabuf_ops_release,
365};
366
367static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
368{
369 int ret;
370 struct sg_table *sgt;
371
372 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
373 if (!sgt) {
374 dev_err(buf->dev, "failed to alloc sg table\n");
375 return NULL;
376 }
377
ccc66e73 378 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
00085f1e 379 buf->size, buf->attrs);
9ef2cbeb
TS
380 if (ret < 0) {
381 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
382 kfree(sgt);
383 return NULL;
384 }
385
386 return sgt;
387}
388
c1b96a23 389static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
9ef2cbeb
TS
390{
391 struct vb2_dc_buf *buf = buf_priv;
392 struct dma_buf *dbuf;
d8fbe341
SS
393 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
394
395 exp_info.ops = &vb2_dc_dmabuf_ops;
396 exp_info.size = buf->size;
397 exp_info.flags = flags;
398 exp_info.priv = buf;
9ef2cbeb
TS
399
400 if (!buf->sgt_base)
401 buf->sgt_base = vb2_dc_get_base_sgt(buf);
402
403 if (WARN_ON(!buf->sgt_base))
404 return NULL;
405
d8fbe341 406 dbuf = dma_buf_export(&exp_info);
9ef2cbeb
TS
407 if (IS_ERR(dbuf))
408 return NULL;
409
410 /* dmabuf keeps reference to vb2 buffer */
6c4bb65d 411 refcount_inc(&buf->refcount);
9ef2cbeb
TS
412
413 return dbuf;
414}
415
40d8b766
LP
416/*********************************************/
417/* callbacks for USERPTR buffers */
418/*********************************************/
419
e15dab75
TS
420static void vb2_dc_put_userptr(void *buf_priv)
421{
422 struct vb2_dc_buf *buf = buf_priv;
423 struct sg_table *sgt = buf->dma_sgt;
fb639eb3
JK
424 int i;
425 struct page **pages;
e15dab75 426
774d2301 427 if (sgt) {
251a79f8
HV
428 /*
429 * No need to sync to CPU, it's already synced to the CPU
430 * since the finish() memop will have been called before this.
431 */
432 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
00085f1e 433 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
fb639eb3
JK
434 pages = frame_vector_pages(buf->vec);
435 /* sgt should exist only if vector contains pages... */
436 BUG_ON(IS_ERR(pages));
437 for (i = 0; i < frame_vector_count(buf->vec); i++)
438 set_page_dirty_lock(pages[i]);
774d2301
MS
439 sg_free_table(sgt);
440 kfree(sgt);
441 }
fb639eb3 442 vb2_destroy_framevec(buf->vec);
e15dab75
TS
443 kfree(buf);
444}
445
774d2301
MS
446/*
447 * For some kind of reserved memory there might be no struct page available,
448 * so all that can be done to support such 'pages' is to try to convert
449 * pfn to dma address or at the last resort just assume that
450 * dma address == physical address (like it has been assumed in earlier version
451 * of videobuf2-dma-contig
452 */
453
454#ifdef __arch_pfn_to_dma
455static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
456{
457 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
458}
459#elif defined(__pfn_to_bus)
460static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
461{
462 return (dma_addr_t)__pfn_to_bus(pfn);
463}
464#elif defined(__pfn_to_phys)
465static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
466{
467 return (dma_addr_t)__pfn_to_phys(pfn);
468}
469#else
470static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
471{
472 /* really, we cannot do anything better at this point */
473 return (dma_addr_t)(pfn) << PAGE_SHIFT;
474}
475#endif
476
36c0f8b3 477static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
cd474037 478 unsigned long size, enum dma_data_direction dma_dir)
1a758d4e
PO
479{
480 struct vb2_dc_buf *buf;
fb639eb3 481 struct frame_vector *vec;
e15dab75 482 unsigned long offset;
fb639eb3 483 int n_pages, i;
e15dab75 484 int ret = 0;
e15dab75
TS
485 struct sg_table *sgt;
486 unsigned long contig_size;
d81e870d
MS
487 unsigned long dma_align = dma_get_cache_alignment();
488
489 /* Only cache aligned DMA transfers are reliable */
490 if (!IS_ALIGNED(vaddr | size, dma_align)) {
491 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
492 return ERR_PTR(-EINVAL);
493 }
494
495 if (!size) {
496 pr_debug("size is zero\n");
497 return ERR_PTR(-EINVAL);
498 }
1a758d4e 499
10791829
HV
500 if (WARN_ON(!dev))
501 return ERR_PTR(-EINVAL);
502
1a758d4e
PO
503 buf = kzalloc(sizeof *buf, GFP_KERNEL);
504 if (!buf)
505 return ERR_PTR(-ENOMEM);
506
36c0f8b3 507 buf->dev = dev;
cd474037 508 buf->dma_dir = dma_dir;
e15dab75 509
e15dab75 510 offset = vaddr & ~PAGE_MASK;
fb639eb3
JK
511 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
512 if (IS_ERR(vec)) {
513 ret = PTR_ERR(vec);
e15dab75
TS
514 goto fail_buf;
515 }
fb639eb3
JK
516 buf->vec = vec;
517 n_pages = frame_vector_count(vec);
518 ret = frame_vector_to_pages(vec);
519 if (ret < 0) {
520 unsigned long *nums = frame_vector_pfns(vec);
e15dab75 521
fb639eb3
JK
522 /*
523 * Failed to convert to pages... Check the memory is physically
524 * contiguous and use direct mapping
525 */
526 for (i = 1; i < n_pages; i++)
527 if (nums[i-1] + 1 != nums[i])
528 goto fail_pfnvec;
529 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
530 goto out;
e15dab75
TS
531 }
532
533 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
534 if (!sgt) {
535 pr_err("failed to allocate sg table\n");
536 ret = -ENOMEM;
fb639eb3 537 goto fail_pfnvec;
e15dab75
TS
538 }
539
fb639eb3 540 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
e15dab75
TS
541 offset, size, GFP_KERNEL);
542 if (ret) {
543 pr_err("failed to initialize sg table\n");
544 goto fail_sgt;
545 }
546
251a79f8
HV
547 /*
548 * No need to sync to the device, this will happen later when the
549 * prepare() memop is called.
550 */
551 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
00085f1e 552 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
e15dab75
TS
553 if (sgt->nents <= 0) {
554 pr_err("failed to map scatterlist\n");
555 ret = -EIO;
556 goto fail_sgt_init;
557 }
558
559 contig_size = vb2_dc_get_contiguous_size(sgt);
560 if (contig_size < size) {
561 pr_err("contiguous mapping is too small %lu/%lu\n",
562 contig_size, size);
563 ret = -EFAULT;
564 goto fail_map_sg;
1a758d4e
PO
565 }
566
e15dab75 567 buf->dma_addr = sg_dma_address(sgt->sgl);
e15dab75 568 buf->dma_sgt = sgt;
fb639eb3
JK
569out:
570 buf->size = size;
1a758d4e
PO
571
572 return buf;
1a758d4e 573
e15dab75 574fail_map_sg:
251a79f8 575 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
00085f1e 576 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
1a758d4e 577
e15dab75 578fail_sgt_init:
e15dab75
TS
579 sg_free_table(sgt);
580
581fail_sgt:
582 kfree(sgt);
1a758d4e 583
fb639eb3
JK
584fail_pfnvec:
585 vb2_destroy_framevec(vec);
e15dab75
TS
586
587fail_buf:
1a758d4e 588 kfree(buf);
e15dab75
TS
589
590 return ERR_PTR(ret);
1a758d4e
PO
591}
592
8c417d03
SS
593/*********************************************/
594/* callbacks for DMABUF buffers */
595/*********************************************/
596
597static int vb2_dc_map_dmabuf(void *mem_priv)
598{
599 struct vb2_dc_buf *buf = mem_priv;
600 struct sg_table *sgt;
601 unsigned long contig_size;
602
603 if (WARN_ON(!buf->db_attach)) {
604 pr_err("trying to pin a non attached buffer\n");
605 return -EINVAL;
606 }
607
608 if (WARN_ON(buf->dma_sgt)) {
609 pr_err("dmabuf buffer is already pinned\n");
610 return 0;
611 }
612
613 /* get the associated scatterlist for this buffer */
614 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
fee0c54e 615 if (IS_ERR(sgt)) {
8c417d03
SS
616 pr_err("Error getting dmabuf scatterlist\n");
617 return -EINVAL;
618 }
619
620 /* checking if dmabuf is big enough to store contiguous chunk */
621 contig_size = vb2_dc_get_contiguous_size(sgt);
622 if (contig_size < buf->size) {
623 pr_err("contiguous chunk is too small %lu/%lu b\n",
624 contig_size, buf->size);
625 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
626 return -EFAULT;
627 }
628
629 buf->dma_addr = sg_dma_address(sgt->sgl);
630 buf->dma_sgt = sgt;
6bbd4fec 631 buf->vaddr = NULL;
8c417d03
SS
632
633 return 0;
634}
635
636static void vb2_dc_unmap_dmabuf(void *mem_priv)
637{
638 struct vb2_dc_buf *buf = mem_priv;
639 struct sg_table *sgt = buf->dma_sgt;
640
641 if (WARN_ON(!buf->db_attach)) {
642 pr_err("trying to unpin a not attached buffer\n");
643 return;
644 }
645
646 if (WARN_ON(!sgt)) {
647 pr_err("dmabuf buffer is already unpinned\n");
648 return;
649 }
650
6bbd4fec
PZ
651 if (buf->vaddr) {
652 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
653 buf->vaddr = NULL;
654 }
8c417d03
SS
655 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
656
657 buf->dma_addr = 0;
658 buf->dma_sgt = NULL;
659}
660
661static void vb2_dc_detach_dmabuf(void *mem_priv)
662{
663 struct vb2_dc_buf *buf = mem_priv;
664
665 /* if vb2 works correctly you should never detach mapped buffer */
666 if (WARN_ON(buf->dma_addr))
667 vb2_dc_unmap_dmabuf(buf);
668
669 /* detach this attachment */
670 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
671 kfree(buf);
672}
673
36c0f8b3 674static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
cd474037 675 unsigned long size, enum dma_data_direction dma_dir)
8c417d03 676{
8c417d03
SS
677 struct vb2_dc_buf *buf;
678 struct dma_buf_attachment *dba;
679
680 if (dbuf->size < size)
681 return ERR_PTR(-EFAULT);
682
10791829
HV
683 if (WARN_ON(!dev))
684 return ERR_PTR(-EINVAL);
685
8c417d03
SS
686 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
687 if (!buf)
688 return ERR_PTR(-ENOMEM);
689
36c0f8b3 690 buf->dev = dev;
8c417d03
SS
691 /* create attachment for the dmabuf with the user device */
692 dba = dma_buf_attach(dbuf, buf->dev);
693 if (IS_ERR(dba)) {
694 pr_err("failed to attach dmabuf\n");
695 kfree(buf);
696 return dba;
697 }
698
cd474037 699 buf->dma_dir = dma_dir;
8c417d03
SS
700 buf->size = size;
701 buf->db_attach = dba;
702
703 return buf;
704}
705
40d8b766
LP
706/*********************************************/
707/* DMA CONTIG exported functions */
708/*********************************************/
709
1a758d4e 710const struct vb2_mem_ops vb2_dma_contig_memops = {
f7f129ce
LP
711 .alloc = vb2_dc_alloc,
712 .put = vb2_dc_put,
9ef2cbeb 713 .get_dmabuf = vb2_dc_get_dmabuf,
f7f129ce
LP
714 .cookie = vb2_dc_cookie,
715 .vaddr = vb2_dc_vaddr,
716 .mmap = vb2_dc_mmap,
717 .get_userptr = vb2_dc_get_userptr,
718 .put_userptr = vb2_dc_put_userptr,
199d101e
MS
719 .prepare = vb2_dc_prepare,
720 .finish = vb2_dc_finish,
8c417d03
SS
721 .map_dmabuf = vb2_dc_map_dmabuf,
722 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
723 .attach_dmabuf = vb2_dc_attach_dmabuf,
724 .detach_dmabuf = vb2_dc_detach_dmabuf,
f7f129ce 725 .num_users = vb2_dc_num_users,
1a758d4e
PO
726};
727EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
728
3f033969
MS
729/**
730 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
731 * @dev: device for configuring DMA parameters
732 * @size: size of DMA max segment size to set
733 *
734 * To allow mapping the scatter-list into a single chunk in the DMA
735 * address space, the device is required to have the DMA max segment
736 * size parameter set to a value larger than the buffer size. Otherwise,
737 * the DMA-mapping subsystem will split the mapping into max segment
738 * size chunks. This function sets the DMA max segment size
739 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
740 * address space.
741 * This code assumes that the DMA-mapping subsystem will merge all
742 * scatterlist segments if this is really possible (for example when
743 * an IOMMU is available and enabled).
744 * Ideally, this parameter should be set by the generic bus code, but it
745 * is left with the default 64KiB value due to historical litmiations in
746 * other subsystems (like limited USB host drivers) and there no good
747 * place to set it to the proper value.
748 * This function should be called from the drivers, which are known to
749 * operate on platforms with IOMMU and provide access to shared buffers
750 * (either USERPTR or DMABUF). This should be done before initializing
751 * videobuf2 queue.
752 */
753int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
754{
755 if (!dev->dma_parms) {
6cb164f5 756 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
3f033969
MS
757 if (!dev->dma_parms)
758 return -ENOMEM;
759 }
760 if (dma_get_max_seg_size(dev) < size)
761 return dma_set_max_seg_size(dev, size);
762
763 return 0;
764}
765EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
766
767/*
768 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
769 * @dev: device for configuring DMA parameters
770 *
771 * This function releases resources allocated to configure DMA parameters
772 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
773 * device drivers on driver remove.
774 */
775void vb2_dma_contig_clear_max_seg_size(struct device *dev)
776{
777 kfree(dev->dma_parms);
778 dev->dma_parms = NULL;
779}
780EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
781
1a758d4e 782MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
95072084 783MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
1a758d4e 784MODULE_LICENSE("GPL");