]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/tee/optee/supp.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 281
[mirror_ubuntu-hirsute-kernel.git] / drivers / tee / optee / supp.c
CommitLineData
4fb0a5eb
JW
1/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/uaccess.h>
17#include "optee_private.h"
18
1647a5ac
JW
19struct optee_supp_req {
20 struct list_head link;
21
b2d102bd 22 bool in_queue;
1647a5ac
JW
23 u32 func;
24 u32 ret;
25 size_t num_params;
26 struct tee_param *param;
27
28 struct completion c;
29};
30
4fb0a5eb
JW
31void optee_supp_init(struct optee_supp *supp)
32{
33 memset(supp, 0, sizeof(*supp));
1647a5ac
JW
34 mutex_init(&supp->mutex);
35 init_completion(&supp->reqs_c);
36 idr_init(&supp->idr);
37 INIT_LIST_HEAD(&supp->reqs);
38 supp->req_id = -1;
4fb0a5eb
JW
39}
40
41void optee_supp_uninit(struct optee_supp *supp)
42{
1647a5ac
JW
43 mutex_destroy(&supp->mutex);
44 idr_destroy(&supp->idr);
45}
46
47void optee_supp_release(struct optee_supp *supp)
48{
49 int id;
50 struct optee_supp_req *req;
51 struct optee_supp_req *req_tmp;
52
53 mutex_lock(&supp->mutex);
54
55 /* Abort all request retrieved by supplicant */
56 idr_for_each_entry(&supp->idr, req, id) {
1647a5ac
JW
57 idr_remove(&supp->idr, id);
58 req->ret = TEEC_ERROR_COMMUNICATION;
59 complete(&req->c);
60 }
61
62 /* Abort all queued requests */
63 list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
64 list_del(&req->link);
b2d102bd 65 req->in_queue = false;
1647a5ac
JW
66 req->ret = TEEC_ERROR_COMMUNICATION;
67 complete(&req->c);
68 }
69
70 supp->ctx = NULL;
71 supp->req_id = -1;
72
73 mutex_unlock(&supp->mutex);
4fb0a5eb
JW
74}
75
76/**
77 * optee_supp_thrd_req() - request service from supplicant
78 * @ctx: context doing the request
79 * @func: function requested
80 * @num_params: number of elements in @param array
81 * @param: parameters for function
82 *
83 * Returns result of operation to be passed to secure world
84 */
85u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
86 struct tee_param *param)
1647a5ac 87
4fb0a5eb 88{
4fb0a5eb
JW
89 struct optee *optee = tee_get_drvdata(ctx->teedev);
90 struct optee_supp *supp = &optee->supp;
42bf4152 91 struct optee_supp_req *req;
1647a5ac 92 bool interruptable;
4fb0a5eb
JW
93 u32 ret;
94
42bf4152
SG
95 /*
96 * Return in case there is no supplicant available and
97 * non-blocking request.
98 */
99 if (!supp->ctx && ctx->supp_nowait)
100 return TEEC_ERROR_COMMUNICATION;
101
102 req = kzalloc(sizeof(*req), GFP_KERNEL);
1647a5ac
JW
103 if (!req)
104 return TEEC_ERROR_OUT_OF_MEMORY;
4fb0a5eb 105
1647a5ac
JW
106 init_completion(&req->c);
107 req->func = func;
108 req->num_params = num_params;
109 req->param = param;
4fb0a5eb 110
1647a5ac
JW
111 /* Insert the request in the request list */
112 mutex_lock(&supp->mutex);
113 list_add_tail(&req->link, &supp->reqs);
b2d102bd 114 req->in_queue = true;
1647a5ac 115 mutex_unlock(&supp->mutex);
4fb0a5eb 116
1647a5ac
JW
117 /* Tell an eventual waiter there's a new request */
118 complete(&supp->reqs_c);
4fb0a5eb
JW
119
120 /*
121 * Wait for supplicant to process and return result, once we've
1647a5ac 122 * returned from wait_for_completion(&req->c) successfully we have
4fb0a5eb
JW
123 * exclusive access again.
124 */
1647a5ac
JW
125 while (wait_for_completion_interruptible(&req->c)) {
126 mutex_lock(&supp->mutex);
4fb0a5eb
JW
127 interruptable = !supp->ctx;
128 if (interruptable) {
129 /*
130 * There's no supplicant available and since the
1647a5ac 131 * supp->mutex currently is held none can
4fb0a5eb
JW
132 * become available until the mutex released
133 * again.
134 *
135 * Interrupting an RPC to supplicant is only
136 * allowed as a way of slightly improving the user
137 * experience in case the supplicant hasn't been
138 * started yet. During normal operation the supplicant
139 * will serve all requests in a timely manner and
140 * interrupting then wouldn't make sense.
141 */
b2d102bd 142 if (req->in_queue) {
1647a5ac 143 list_del(&req->link);
b2d102bd
ZZ
144 req->in_queue = false;
145 }
4fb0a5eb 146 }
1647a5ac
JW
147 mutex_unlock(&supp->mutex);
148
149 if (interruptable) {
150 req->ret = TEEC_ERROR_COMMUNICATION;
4fb0a5eb 151 break;
1647a5ac 152 }
4fb0a5eb
JW
153 }
154
1647a5ac
JW
155 ret = req->ret;
156 kfree(req);
4fb0a5eb
JW
157
158 return ret;
159}
160
1647a5ac
JW
161static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
162 int num_params, int *id)
163{
164 struct optee_supp_req *req;
165
166 if (supp->req_id != -1) {
167 /*
168 * Supplicant should not mix synchronous and asnynchronous
169 * requests.
170 */
171 return ERR_PTR(-EINVAL);
172 }
173
174 if (list_empty(&supp->reqs))
175 return NULL;
176
177 req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
178
179 if (num_params < req->num_params) {
180 /* Not enough room for parameters */
181 return ERR_PTR(-EINVAL);
182 }
183
184 *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
185 if (*id < 0)
186 return ERR_PTR(-ENOMEM);
187
188 list_del(&req->link);
b2d102bd 189 req->in_queue = false;
1647a5ac
JW
190
191 return req;
192}
193
194static int supp_check_recv_params(size_t num_params, struct tee_param *params,
195 size_t *num_meta)
f2aa9724
JW
196{
197 size_t n;
198
1647a5ac
JW
199 if (!num_params)
200 return -EINVAL;
201
f2aa9724
JW
202 /*
203 * If there's memrefs we need to decrease those as they where
204 * increased earlier and we'll even refuse to accept any below.
205 */
206 for (n = 0; n < num_params; n++)
207 if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
208 tee_shm_put(params[n].u.memref.shm);
209
210 /*
1647a5ac
JW
211 * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
212 * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
f2aa9724
JW
213 */
214 for (n = 0; n < num_params; n++)
1647a5ac
JW
215 if (params[n].attr &&
216 params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
f2aa9724 217 return -EINVAL;
1647a5ac
JW
218
219 /* At most we'll need one meta parameter so no need to check for more */
220 if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
221 *num_meta = 1;
222 else
223 *num_meta = 0;
224
f2aa9724
JW
225 return 0;
226}
227
4fb0a5eb
JW
228/**
229 * optee_supp_recv() - receive request for supplicant
230 * @ctx: context receiving the request
231 * @func: requested function in supplicant
232 * @num_params: number of elements allocated in @param, updated with number
233 * used elements
234 * @param: space for parameters for @func
235 *
236 * Returns 0 on success or <0 on failure
237 */
238int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
239 struct tee_param *param)
240{
241 struct tee_device *teedev = ctx->teedev;
242 struct optee *optee = tee_get_drvdata(teedev);
243 struct optee_supp *supp = &optee->supp;
1647a5ac
JW
244 struct optee_supp_req *req = NULL;
245 int id;
246 size_t num_meta;
4fb0a5eb
JW
247 int rc;
248
1647a5ac 249 rc = supp_check_recv_params(*num_params, param, &num_meta);
f2aa9724
JW
250 if (rc)
251 return rc;
252
1647a5ac
JW
253 while (true) {
254 mutex_lock(&supp->mutex);
255 req = supp_pop_entry(supp, *num_params - num_meta, &id);
256 mutex_unlock(&supp->mutex);
257
258 if (req) {
259 if (IS_ERR(req))
260 return PTR_ERR(req);
261 break;
262 }
4fb0a5eb 263
4fb0a5eb 264 /*
1647a5ac
JW
265 * If we didn't get a request we'll block in
266 * wait_for_completion() to avoid needless spinning.
267 *
268 * This is where supplicant will be hanging most of
269 * the time, let's make this interruptable so we
270 * can easily restart supplicant if needed.
4fb0a5eb 271 */
1647a5ac
JW
272 if (wait_for_completion_interruptible(&supp->reqs_c))
273 return -ERESTARTSYS;
4fb0a5eb
JW
274 }
275
1647a5ac
JW
276 if (num_meta) {
277 /*
278 * tee-supplicant support meta parameters -> requsts can be
279 * processed asynchronously.
280 */
281 param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
282 TEE_IOCTL_PARAM_ATTR_META;
283 param->u.value.a = id;
284 param->u.value.b = 0;
285 param->u.value.c = 0;
286 } else {
287 mutex_lock(&supp->mutex);
288 supp->req_id = id;
289 mutex_unlock(&supp->mutex);
4fb0a5eb
JW
290 }
291
1647a5ac
JW
292 *func = req->func;
293 *num_params = req->num_params + num_meta;
294 memcpy(param + num_meta, req->param,
295 sizeof(struct tee_param) * req->num_params);
4fb0a5eb 296
1647a5ac
JW
297 return 0;
298}
299
300static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
301 size_t num_params,
302 struct tee_param *param,
303 size_t *num_meta)
304{
305 struct optee_supp_req *req;
306 int id;
307 size_t nm;
308 const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
309 TEE_IOCTL_PARAM_ATTR_META;
310
311 if (!num_params)
312 return ERR_PTR(-EINVAL);
313
314 if (supp->req_id == -1) {
315 if (param->attr != attr)
316 return ERR_PTR(-EINVAL);
317 id = param->u.value.a;
318 nm = 1;
319 } else {
320 id = supp->req_id;
321 nm = 0;
4fb0a5eb
JW
322 }
323
1647a5ac
JW
324 req = idr_find(&supp->idr, id);
325 if (!req)
326 return ERR_PTR(-ENOENT);
327
328 if ((num_params - nm) != req->num_params)
329 return ERR_PTR(-EINVAL);
4fb0a5eb 330
1647a5ac
JW
331 idr_remove(&supp->idr, id);
332 supp->req_id = -1;
333 *num_meta = nm;
4fb0a5eb 334
1647a5ac 335 return req;
4fb0a5eb
JW
336}
337
338/**
339 * optee_supp_send() - send result of request from supplicant
340 * @ctx: context sending result
341 * @ret: return value of request
342 * @num_params: number of parameters returned
343 * @param: returned parameters
344 *
345 * Returns 0 on success or <0 on failure.
346 */
347int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
348 struct tee_param *param)
349{
350 struct tee_device *teedev = ctx->teedev;
351 struct optee *optee = tee_get_drvdata(teedev);
352 struct optee_supp *supp = &optee->supp;
1647a5ac 353 struct optee_supp_req *req;
4fb0a5eb 354 size_t n;
1647a5ac 355 size_t num_meta;
4fb0a5eb 356
1647a5ac
JW
357 mutex_lock(&supp->mutex);
358 req = supp_pop_req(supp, num_params, param, &num_meta);
359 mutex_unlock(&supp->mutex);
4fb0a5eb 360
1647a5ac
JW
361 if (IS_ERR(req)) {
362 /* Something is wrong, let supplicant restart. */
363 return PTR_ERR(req);
4fb0a5eb
JW
364 }
365
366 /* Update out and in/out parameters */
1647a5ac
JW
367 for (n = 0; n < req->num_params; n++) {
368 struct tee_param *p = req->param + n;
4fb0a5eb 369
1647a5ac 370 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
4fb0a5eb
JW
371 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
372 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
1647a5ac
JW
373 p->u.value.a = param[n + num_meta].u.value.a;
374 p->u.value.b = param[n + num_meta].u.value.b;
375 p->u.value.c = param[n + num_meta].u.value.c;
4fb0a5eb
JW
376 break;
377 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
378 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
1647a5ac 379 p->u.memref.size = param[n + num_meta].u.memref.size;
4fb0a5eb
JW
380 break;
381 default:
382 break;
383 }
384 }
1647a5ac 385 req->ret = ret;
4fb0a5eb
JW
386
387 /* Let the requesting thread continue */
1647a5ac
JW
388 complete(&req->c);
389
390 return 0;
4fb0a5eb 391}