1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2016, Linaro Limited
5 #include <linux/device.h>
6 #include <linux/dma-buf.h>
7 #include <linux/fdtable.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include <linux/uio.h>
13 #include "tee_private.h"
15 static void tee_shm_release(struct tee_shm
*shm
)
17 struct tee_device
*teedev
= shm
->ctx
->teedev
;
19 if (shm
->flags
& TEE_SHM_DMA_BUF
) {
20 mutex_lock(&teedev
->mutex
);
21 idr_remove(&teedev
->idr
, shm
->id
);
22 mutex_unlock(&teedev
->mutex
);
25 if (shm
->flags
& TEE_SHM_POOL
) {
26 struct tee_shm_pool_mgr
*poolm
;
28 if (shm
->flags
& TEE_SHM_DMA_BUF
)
29 poolm
= teedev
->pool
->dma_buf_mgr
;
31 poolm
= teedev
->pool
->private_mgr
;
33 poolm
->ops
->free(poolm
, shm
);
34 } else if (shm
->flags
& TEE_SHM_REGISTER
) {
36 int rc
= teedev
->desc
->ops
->shm_unregister(shm
->ctx
, shm
);
39 dev_err(teedev
->dev
.parent
,
40 "unregister shm %p failed: %d", shm
, rc
);
42 for (n
= 0; n
< shm
->num_pages
; n
++)
43 put_page(shm
->pages
[n
]);
48 teedev_ctx_put(shm
->ctx
);
52 tee_device_put(teedev
);
55 static struct sg_table
*tee_shm_op_map_dma_buf(struct dma_buf_attachment
56 *attach
, enum dma_data_direction dir
)
61 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment
*attach
,
62 struct sg_table
*table
,
63 enum dma_data_direction dir
)
67 static void tee_shm_op_release(struct dma_buf
*dmabuf
)
69 struct tee_shm
*shm
= dmabuf
->priv
;
74 static int tee_shm_op_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
76 struct tee_shm
*shm
= dmabuf
->priv
;
77 size_t size
= vma
->vm_end
- vma
->vm_start
;
79 /* Refuse sharing shared memory provided by application */
80 if (shm
->flags
& TEE_SHM_USER_MAPPED
)
83 return remap_pfn_range(vma
, vma
->vm_start
, shm
->paddr
>> PAGE_SHIFT
,
84 size
, vma
->vm_page_prot
);
87 static const struct dma_buf_ops tee_shm_dma_buf_ops
= {
88 .map_dma_buf
= tee_shm_op_map_dma_buf
,
89 .unmap_dma_buf
= tee_shm_op_unmap_dma_buf
,
90 .release
= tee_shm_op_release
,
91 .mmap
= tee_shm_op_mmap
,
94 struct tee_shm
*tee_shm_alloc(struct tee_context
*ctx
, size_t size
, u32 flags
)
96 struct tee_device
*teedev
= ctx
->teedev
;
97 struct tee_shm_pool_mgr
*poolm
= NULL
;
102 if (!(flags
& TEE_SHM_MAPPED
)) {
103 dev_err(teedev
->dev
.parent
,
104 "only mapped allocations supported\n");
105 return ERR_PTR(-EINVAL
);
108 if ((flags
& ~(TEE_SHM_MAPPED
| TEE_SHM_DMA_BUF
))) {
109 dev_err(teedev
->dev
.parent
, "invalid shm flags 0x%x", flags
);
110 return ERR_PTR(-EINVAL
);
113 if (!tee_device_get(teedev
))
114 return ERR_PTR(-EINVAL
);
117 /* teedev has been detached from driver */
118 ret
= ERR_PTR(-EINVAL
);
122 shm
= kzalloc(sizeof(*shm
), GFP_KERNEL
);
124 ret
= ERR_PTR(-ENOMEM
);
128 shm
->flags
= flags
| TEE_SHM_POOL
;
130 if (flags
& TEE_SHM_DMA_BUF
)
131 poolm
= teedev
->pool
->dma_buf_mgr
;
133 poolm
= teedev
->pool
->private_mgr
;
135 rc
= poolm
->ops
->alloc(poolm
, shm
, size
);
142 if (flags
& TEE_SHM_DMA_BUF
) {
143 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
145 mutex_lock(&teedev
->mutex
);
146 shm
->id
= idr_alloc(&teedev
->idr
, shm
, 1, 0, GFP_KERNEL
);
147 mutex_unlock(&teedev
->mutex
);
149 ret
= ERR_PTR(shm
->id
);
153 exp_info
.ops
= &tee_shm_dma_buf_ops
;
154 exp_info
.size
= shm
->size
;
155 exp_info
.flags
= O_RDWR
;
158 shm
->dmabuf
= dma_buf_export(&exp_info
);
159 if (IS_ERR(shm
->dmabuf
)) {
160 ret
= ERR_CAST(shm
->dmabuf
);
169 if (flags
& TEE_SHM_DMA_BUF
) {
170 mutex_lock(&teedev
->mutex
);
171 idr_remove(&teedev
->idr
, shm
->id
);
172 mutex_unlock(&teedev
->mutex
);
175 poolm
->ops
->free(poolm
, shm
);
179 tee_device_put(teedev
);
182 EXPORT_SYMBOL_GPL(tee_shm_alloc
);
184 struct tee_shm
*tee_shm_register(struct tee_context
*ctx
, unsigned long addr
,
185 size_t length
, u32 flags
)
187 struct tee_device
*teedev
= ctx
->teedev
;
188 const u32 req_user_flags
= TEE_SHM_DMA_BUF
| TEE_SHM_USER_MAPPED
;
189 const u32 req_kernel_flags
= TEE_SHM_DMA_BUF
| TEE_SHM_KERNEL_MAPPED
;
196 if (flags
!= req_user_flags
&& flags
!= req_kernel_flags
)
197 return ERR_PTR(-ENOTSUPP
);
199 if (!tee_device_get(teedev
))
200 return ERR_PTR(-EINVAL
);
202 if (!teedev
->desc
->ops
->shm_register
||
203 !teedev
->desc
->ops
->shm_unregister
) {
204 tee_device_put(teedev
);
205 return ERR_PTR(-ENOTSUPP
);
210 shm
= kzalloc(sizeof(*shm
), GFP_KERNEL
);
212 ret
= ERR_PTR(-ENOMEM
);
216 shm
->flags
= flags
| TEE_SHM_REGISTER
;
219 addr
= untagged_addr(addr
);
220 start
= rounddown(addr
, PAGE_SIZE
);
221 shm
->offset
= addr
- start
;
223 num_pages
= (roundup(addr
+ length
, PAGE_SIZE
) - start
) / PAGE_SIZE
;
224 shm
->pages
= kcalloc(num_pages
, sizeof(*shm
->pages
), GFP_KERNEL
);
226 ret
= ERR_PTR(-ENOMEM
);
230 if (flags
& TEE_SHM_USER_MAPPED
) {
231 rc
= get_user_pages_fast(start
, num_pages
, FOLL_WRITE
,
237 kiov
= kcalloc(num_pages
, sizeof(*kiov
), GFP_KERNEL
);
239 ret
= ERR_PTR(-ENOMEM
);
243 for (i
= 0; i
< num_pages
; i
++) {
244 kiov
[i
].iov_base
= (void *)(start
+ i
* PAGE_SIZE
);
245 kiov
[i
].iov_len
= PAGE_SIZE
;
248 rc
= get_kernel_pages(kiov
, num_pages
, 0, shm
->pages
);
253 if (rc
!= num_pages
) {
260 mutex_lock(&teedev
->mutex
);
261 shm
->id
= idr_alloc(&teedev
->idr
, shm
, 1, 0, GFP_KERNEL
);
262 mutex_unlock(&teedev
->mutex
);
265 ret
= ERR_PTR(shm
->id
);
269 rc
= teedev
->desc
->ops
->shm_register(ctx
, shm
, shm
->pages
,
270 shm
->num_pages
, start
);
276 if (flags
& TEE_SHM_DMA_BUF
) {
277 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
279 exp_info
.ops
= &tee_shm_dma_buf_ops
;
280 exp_info
.size
= shm
->size
;
281 exp_info
.flags
= O_RDWR
;
284 shm
->dmabuf
= dma_buf_export(&exp_info
);
285 if (IS_ERR(shm
->dmabuf
)) {
286 ret
= ERR_CAST(shm
->dmabuf
);
287 teedev
->desc
->ops
->shm_unregister(ctx
, shm
);
298 mutex_lock(&teedev
->mutex
);
299 idr_remove(&teedev
->idr
, shm
->id
);
300 mutex_unlock(&teedev
->mutex
);
303 for (n
= 0; n
< shm
->num_pages
; n
++)
304 put_page(shm
->pages
[n
]);
310 tee_device_put(teedev
);
313 EXPORT_SYMBOL_GPL(tee_shm_register
);
316 * tee_shm_get_fd() - Increase reference count and return file descriptor
317 * @shm: Shared memory handle
318 * @returns user space file descriptor to shared memory
320 int tee_shm_get_fd(struct tee_shm
*shm
)
324 if (!(shm
->flags
& TEE_SHM_DMA_BUF
))
327 get_dma_buf(shm
->dmabuf
);
328 fd
= dma_buf_fd(shm
->dmabuf
, O_CLOEXEC
);
330 dma_buf_put(shm
->dmabuf
);
335 * tee_shm_free() - Free shared memory
336 * @shm: Handle to shared memory to free
338 void tee_shm_free(struct tee_shm
*shm
)
341 * dma_buf_put() decreases the dmabuf reference counter and will
342 * call tee_shm_release() when the last reference is gone.
344 * In the case of driver private memory we call tee_shm_release
345 * directly instead as it doesn't have a reference counter.
347 if (shm
->flags
& TEE_SHM_DMA_BUF
)
348 dma_buf_put(shm
->dmabuf
);
350 tee_shm_release(shm
);
352 EXPORT_SYMBOL_GPL(tee_shm_free
);
355 * tee_shm_va2pa() - Get physical address of a virtual address
356 * @shm: Shared memory handle
357 * @va: Virtual address to tranlsate
358 * @pa: Returned physical address
359 * @returns 0 on success and < 0 on failure
361 int tee_shm_va2pa(struct tee_shm
*shm
, void *va
, phys_addr_t
*pa
)
363 if (!(shm
->flags
& TEE_SHM_MAPPED
))
365 /* Check that we're in the range of the shm */
366 if ((char *)va
< (char *)shm
->kaddr
)
368 if ((char *)va
>= ((char *)shm
->kaddr
+ shm
->size
))
371 return tee_shm_get_pa(
372 shm
, (unsigned long)va
- (unsigned long)shm
->kaddr
, pa
);
374 EXPORT_SYMBOL_GPL(tee_shm_va2pa
);
377 * tee_shm_pa2va() - Get virtual address of a physical address
378 * @shm: Shared memory handle
379 * @pa: Physical address to tranlsate
380 * @va: Returned virtual address
381 * @returns 0 on success and < 0 on failure
383 int tee_shm_pa2va(struct tee_shm
*shm
, phys_addr_t pa
, void **va
)
385 if (!(shm
->flags
& TEE_SHM_MAPPED
))
387 /* Check that we're in the range of the shm */
390 if (pa
>= (shm
->paddr
+ shm
->size
))
394 void *v
= tee_shm_get_va(shm
, pa
- shm
->paddr
);
402 EXPORT_SYMBOL_GPL(tee_shm_pa2va
);
405 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
406 * @shm: Shared memory handle
407 * @offs: Offset from start of this shared memory
408 * @returns virtual address of the shared memory + offs if offs is within
409 * the bounds of this shared memory, else an ERR_PTR
411 void *tee_shm_get_va(struct tee_shm
*shm
, size_t offs
)
413 if (!(shm
->flags
& TEE_SHM_MAPPED
))
414 return ERR_PTR(-EINVAL
);
415 if (offs
>= shm
->size
)
416 return ERR_PTR(-EINVAL
);
417 return (char *)shm
->kaddr
+ offs
;
419 EXPORT_SYMBOL_GPL(tee_shm_get_va
);
422 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
423 * @shm: Shared memory handle
424 * @offs: Offset from start of this shared memory
425 * @pa: Physical address to return
426 * @returns 0 if offs is within the bounds of this shared memory, else an
429 int tee_shm_get_pa(struct tee_shm
*shm
, size_t offs
, phys_addr_t
*pa
)
431 if (offs
>= shm
->size
)
434 *pa
= shm
->paddr
+ offs
;
437 EXPORT_SYMBOL_GPL(tee_shm_get_pa
);
440 * tee_shm_get_from_id() - Find shared memory object and increase reference
442 * @ctx: Context owning the shared memory
443 * @id: Id of shared memory object
444 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
446 struct tee_shm
*tee_shm_get_from_id(struct tee_context
*ctx
, int id
)
448 struct tee_device
*teedev
;
452 return ERR_PTR(-EINVAL
);
454 teedev
= ctx
->teedev
;
455 mutex_lock(&teedev
->mutex
);
456 shm
= idr_find(&teedev
->idr
, id
);
457 if (!shm
|| shm
->ctx
!= ctx
)
458 shm
= ERR_PTR(-EINVAL
);
459 else if (shm
->flags
& TEE_SHM_DMA_BUF
)
460 get_dma_buf(shm
->dmabuf
);
461 mutex_unlock(&teedev
->mutex
);
464 EXPORT_SYMBOL_GPL(tee_shm_get_from_id
);
467 * tee_shm_put() - Decrease reference count on a shared memory handle
468 * @shm: Shared memory handle
470 void tee_shm_put(struct tee_shm
*shm
)
472 if (shm
->flags
& TEE_SHM_DMA_BUF
)
473 dma_buf_put(shm
->dmabuf
);
475 EXPORT_SYMBOL_GPL(tee_shm_put
);