]> git.proxmox.com Git - mirror_zfs.git/blob - module/icp/core/kcf_sched.c
Illumos Crypto Port module added to enable native encryption in zfs
[mirror_zfs.git] / module / icp / core / kcf_sched.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * This file contains the core framework routines for the
28 * kernel cryptographic framework. These routines are at the
29 * layer, between the kernel API/ioctls and the SPI.
30 */
31
32 #include <sys/zfs_context.h>
33 #include <sys/crypto/common.h>
34 #include <sys/crypto/impl.h>
35 #include <sys/crypto/sched_impl.h>
36 #include <sys/crypto/api.h>
37
38 kcf_global_swq_t *gswq; /* Global software queue */
39
40 /* Thread pool related variables */
41 static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */
42 int kcf_maxthreads = 2;
43 int kcf_minthreads = 1;
44 int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */
45 static ulong_t kcf_idlethr_timeout;
46 #define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */
47
48 /* kmem caches used by the scheduler */
49 static kmem_cache_t *kcf_sreq_cache;
50 static kmem_cache_t *kcf_areq_cache;
51 static kmem_cache_t *kcf_context_cache;
52
53 /* Global request ID table */
54 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES];
55
56 /* KCF stats. Not protected. */
57 static kcf_stats_t kcf_ksdata = {
58 { "total threads in pool", KSTAT_DATA_UINT32},
59 { "idle threads in pool", KSTAT_DATA_UINT32},
60 { "min threads in pool", KSTAT_DATA_UINT32},
61 { "max threads in pool", KSTAT_DATA_UINT32},
62 { "requests in gswq", KSTAT_DATA_UINT32},
63 { "max requests in gswq", KSTAT_DATA_UINT32},
64 { "threads for HW taskq", KSTAT_DATA_UINT32},
65 { "minalloc for HW taskq", KSTAT_DATA_UINT32},
66 { "maxalloc for HW taskq", KSTAT_DATA_UINT32}
67 };
68
69 static kstat_t *kcf_misc_kstat = NULL;
70 ulong_t kcf_swprov_hndl = 0;
71
72 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
73 kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
74 static int kcf_disp_sw_request(kcf_areq_node_t *);
75 static void process_req_hwp(void *);
76 static int kcf_enqueue(kcf_areq_node_t *);
77 static void kcfpool_alloc(void);
78 static void kcf_reqid_delete(kcf_areq_node_t *areq);
79 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq);
80 static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
81
82 /*
83 * Create a new context.
84 */
85 crypto_ctx_t *
86 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
87 crypto_session_id_t sid)
88 {
89 crypto_ctx_t *ctx;
90 kcf_context_t *kcf_ctx;
91
92 kcf_ctx = kmem_cache_alloc(kcf_context_cache,
93 (crq == NULL) ? KM_SLEEP : KM_NOSLEEP);
94 if (kcf_ctx == NULL)
95 return (NULL);
96
97 /* initialize the context for the consumer */
98 kcf_ctx->kc_refcnt = 1;
99 kcf_ctx->kc_req_chain_first = NULL;
100 kcf_ctx->kc_req_chain_last = NULL;
101 kcf_ctx->kc_secondctx = NULL;
102 KCF_PROV_REFHOLD(pd);
103 kcf_ctx->kc_prov_desc = pd;
104 kcf_ctx->kc_sw_prov_desc = NULL;
105 kcf_ctx->kc_mech = NULL;
106
107 ctx = &kcf_ctx->kc_glbl_ctx;
108 ctx->cc_provider = pd->pd_prov_handle;
109 ctx->cc_session = sid;
110 ctx->cc_provider_private = NULL;
111 ctx->cc_framework_private = (void *)kcf_ctx;
112 ctx->cc_flags = 0;
113 ctx->cc_opstate = NULL;
114
115 return (ctx);
116 }
117
118 /*
119 * Allocate a new async request node.
120 *
121 * ictx - Framework private context pointer
122 * crq - Has callback function and argument. Should be non NULL.
123 * req - The parameters to pass to the SPI
124 */
125 static kcf_areq_node_t *
126 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
127 crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
128 {
129 kcf_areq_node_t *arptr, *areq;
130
131 ASSERT(crq != NULL);
132 arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP);
133 if (arptr == NULL)
134 return (NULL);
135
136 arptr->an_state = REQ_ALLOCATED;
137 arptr->an_reqarg = *crq;
138 arptr->an_params = *req;
139 arptr->an_context = ictx;
140 arptr->an_isdual = isdual;
141
142 arptr->an_next = arptr->an_prev = NULL;
143 KCF_PROV_REFHOLD(pd);
144 arptr->an_provider = pd;
145 arptr->an_tried_plist = NULL;
146 arptr->an_refcnt = 1;
147 arptr->an_idnext = arptr->an_idprev = NULL;
148
149 /*
150 * Requests for context-less operations do not use the
151 * fields - an_is_my_turn, and an_ctxchain_next.
152 */
153 if (ictx == NULL)
154 return (arptr);
155
156 KCF_CONTEXT_REFHOLD(ictx);
157 /*
158 * Chain this request to the context.
159 */
160 mutex_enter(&ictx->kc_in_use_lock);
161 arptr->an_ctxchain_next = NULL;
162 if ((areq = ictx->kc_req_chain_last) == NULL) {
163 arptr->an_is_my_turn = B_TRUE;
164 ictx->kc_req_chain_last =
165 ictx->kc_req_chain_first = arptr;
166 } else {
167 ASSERT(ictx->kc_req_chain_first != NULL);
168 arptr->an_is_my_turn = B_FALSE;
169 /* Insert the new request to the end of the chain. */
170 areq->an_ctxchain_next = arptr;
171 ictx->kc_req_chain_last = arptr;
172 }
173 mutex_exit(&ictx->kc_in_use_lock);
174
175 return (arptr);
176 }
177
178 /*
179 * Queue the request node and do one of the following:
180 * - If there is an idle thread signal it to run.
181 * - If there is no idle thread and max running threads is not
182 * reached, signal the creator thread for more threads.
183 *
184 * If the two conditions above are not met, we don't need to do
185 * any thing. The request will be picked up by one of the
186 * worker threads when it becomes available.
187 */
188 static int
189 kcf_disp_sw_request(kcf_areq_node_t *areq)
190 {
191 int err;
192 int cnt = 0;
193
194 if ((err = kcf_enqueue(areq)) != 0)
195 return (err);
196
197 if (kcfpool->kp_idlethreads > 0) {
198 /* Signal an idle thread to run */
199 mutex_enter(&gswq->gs_lock);
200 cv_signal(&gswq->gs_cv);
201 mutex_exit(&gswq->gs_lock);
202
203 return (CRYPTO_QUEUED);
204 }
205
206 /*
207 * We keep the number of running threads to be at
208 * kcf_minthreads to reduce gs_lock contention.
209 */
210 cnt = kcf_minthreads -
211 (kcfpool->kp_threads - kcfpool->kp_blockedthreads);
212 if (cnt > 0) {
213 /*
214 * The following ensures the number of threads in pool
215 * does not exceed kcf_maxthreads.
216 */
217 cnt = MIN(cnt, kcf_maxthreads - (int)kcfpool->kp_threads);
218 if (cnt > 0) {
219 /* Signal the creator thread for more threads */
220 mutex_enter(&kcfpool->kp_user_lock);
221 if (!kcfpool->kp_signal_create_thread) {
222 kcfpool->kp_signal_create_thread = B_TRUE;
223 kcfpool->kp_nthrs = cnt;
224 cv_signal(&kcfpool->kp_user_cv);
225 }
226 mutex_exit(&kcfpool->kp_user_lock);
227 }
228 }
229
230 return (CRYPTO_QUEUED);
231 }
232
233 /*
234 * This routine is called by the taskq associated with
235 * each hardware provider. We notify the kernel consumer
236 * via the callback routine in case of CRYPTO_SUCCESS or
237 * a failure.
238 *
239 * A request can be of type kcf_areq_node_t or of type
240 * kcf_sreq_node_t.
241 */
242 static void
243 process_req_hwp(void *ireq)
244 {
245 int error = 0;
246 crypto_ctx_t *ctx;
247 kcf_call_type_t ctype;
248 kcf_provider_desc_t *pd;
249 kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
250 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
251
252 pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
253 sreq->sn_provider : areq->an_provider;
254
255 /*
256 * Wait if flow control is in effect for the provider. A
257 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
258 * notification will signal us. We also get signaled if
259 * the provider is unregistering.
260 */
261 if (pd->pd_state == KCF_PROV_BUSY) {
262 mutex_enter(&pd->pd_lock);
263 while (pd->pd_state == KCF_PROV_BUSY)
264 cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
265 mutex_exit(&pd->pd_lock);
266 }
267
268 /*
269 * Bump the internal reference count while the request is being
270 * processed. This is how we know when it's safe to unregister
271 * a provider. This step must precede the pd_state check below.
272 */
273 KCF_PROV_IREFHOLD(pd);
274
275 /*
276 * Fail the request if the provider has failed. We return a
277 * recoverable error and the notified clients attempt any
278 * recovery. For async clients this is done in kcf_aop_done()
279 * and for sync clients it is done in the k-api routines.
280 */
281 if (pd->pd_state >= KCF_PROV_FAILED) {
282 error = CRYPTO_DEVICE_ERROR;
283 goto bail;
284 }
285
286 if (ctype == CRYPTO_SYNCH) {
287 mutex_enter(&sreq->sn_lock);
288 sreq->sn_state = REQ_INPROGRESS;
289 mutex_exit(&sreq->sn_lock);
290
291 ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
292 error = common_submit_request(sreq->sn_provider, ctx,
293 sreq->sn_params, sreq);
294 } else {
295 kcf_context_t *ictx;
296 ASSERT(ctype == CRYPTO_ASYNCH);
297
298 /*
299 * We are in the per-hardware provider thread context and
300 * hence can sleep. Note that the caller would have done
301 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
302 */
303 ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;
304
305 mutex_enter(&areq->an_lock);
306 /*
307 * We need to maintain ordering for multi-part requests.
308 * an_is_my_turn is set to B_TRUE initially for a request
309 * when it is enqueued and there are no other requests
310 * for that context. It is set later from kcf_aop_done() when
311 * the request before us in the chain of requests for the
312 * context completes. We get signaled at that point.
313 */
314 if (ictx != NULL) {
315 ASSERT(ictx->kc_prov_desc == areq->an_provider);
316
317 while (areq->an_is_my_turn == B_FALSE) {
318 cv_wait(&areq->an_turn_cv, &areq->an_lock);
319 }
320 }
321 areq->an_state = REQ_INPROGRESS;
322 mutex_exit(&areq->an_lock);
323
324 error = common_submit_request(areq->an_provider, ctx,
325 &areq->an_params, areq);
326 }
327
328 bail:
329 if (error == CRYPTO_QUEUED) {
330 /*
331 * The request is queued by the provider and we should
332 * get a crypto_op_notification() from the provider later.
333 * We notify the consumer at that time.
334 */
335 return;
336 } else { /* CRYPTO_SUCCESS or other failure */
337 KCF_PROV_IREFRELE(pd);
338 if (ctype == CRYPTO_SYNCH)
339 kcf_sop_done(sreq, error);
340 else
341 kcf_aop_done(areq, error);
342 }
343 }
344
345 /*
346 * This routine checks if a request can be retried on another
347 * provider. If true, mech1 is initialized to point to the mechanism
348 * structure. mech2 is also initialized in case of a dual operation. fg
349 * is initialized to the correct crypto_func_group_t bit flag. They are
350 * initialized by this routine, so that the caller can pass them to a
351 * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
352 *
353 * We check that the request is for a init or atomic routine and that
354 * it is for one of the operation groups used from k-api .
355 */
356 static boolean_t
357 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
358 crypto_mechanism_t **mech2, crypto_func_group_t *fg)
359 {
360 kcf_req_params_t *params;
361 kcf_op_type_t optype;
362
363 params = &areq->an_params;
364 optype = params->rp_optype;
365
366 if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype)))
367 return (B_FALSE);
368
369 switch (params->rp_opgrp) {
370 case KCF_OG_DIGEST: {
371 kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
372
373 dops->do_mech.cm_type = dops->do_framework_mechtype;
374 *mech1 = &dops->do_mech;
375 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST :
376 CRYPTO_FG_DIGEST_ATOMIC;
377 break;
378 }
379
380 case KCF_OG_MAC: {
381 kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
382
383 mops->mo_mech.cm_type = mops->mo_framework_mechtype;
384 *mech1 = &mops->mo_mech;
385 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC :
386 CRYPTO_FG_MAC_ATOMIC;
387 break;
388 }
389
390 case KCF_OG_SIGN: {
391 kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
392
393 sops->so_mech.cm_type = sops->so_framework_mechtype;
394 *mech1 = &sops->so_mech;
395 switch (optype) {
396 case KCF_OP_INIT:
397 *fg = CRYPTO_FG_SIGN;
398 break;
399 case KCF_OP_ATOMIC:
400 *fg = CRYPTO_FG_SIGN_ATOMIC;
401 break;
402 default:
403 ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
404 *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
405 }
406 break;
407 }
408
409 case KCF_OG_VERIFY: {
410 kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
411
412 vops->vo_mech.cm_type = vops->vo_framework_mechtype;
413 *mech1 = &vops->vo_mech;
414 switch (optype) {
415 case KCF_OP_INIT:
416 *fg = CRYPTO_FG_VERIFY;
417 break;
418 case KCF_OP_ATOMIC:
419 *fg = CRYPTO_FG_VERIFY_ATOMIC;
420 break;
421 default:
422 ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
423 *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
424 }
425 break;
426 }
427
428 case KCF_OG_ENCRYPT: {
429 kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
430
431 eops->eo_mech.cm_type = eops->eo_framework_mechtype;
432 *mech1 = &eops->eo_mech;
433 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT :
434 CRYPTO_FG_ENCRYPT_ATOMIC;
435 break;
436 }
437
438 case KCF_OG_DECRYPT: {
439 kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
440
441 dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype;
442 *mech1 = &dcrops->dop_mech;
443 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT :
444 CRYPTO_FG_DECRYPT_ATOMIC;
445 break;
446 }
447
448 case KCF_OG_ENCRYPT_MAC: {
449 kcf_encrypt_mac_ops_params_t *eops =
450 &params->rp_u.encrypt_mac_params;
451
452 eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
453 *mech1 = &eops->em_encr_mech;
454 eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
455 *mech2 = &eops->em_mac_mech;
456 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
457 CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
458 break;
459 }
460
461 case KCF_OG_MAC_DECRYPT: {
462 kcf_mac_decrypt_ops_params_t *dops =
463 &params->rp_u.mac_decrypt_params;
464
465 dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
466 *mech1 = &dops->md_mac_mech;
467 dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
468 *mech2 = &dops->md_decr_mech;
469 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
470 CRYPTO_FG_MAC_DECRYPT_ATOMIC;
471 break;
472 }
473
474 default:
475 return (B_FALSE);
476 }
477
478 return (B_TRUE);
479 }
480
481 /*
482 * This routine is called when a request to a provider has failed
483 * with a recoverable error. This routine tries to find another provider
484 * and dispatches the request to the new provider, if one is available.
485 * We reuse the request structure.
486 *
487 * A return value of NULL from kcf_get_mech_provider() indicates
488 * we have tried the last provider.
489 */
490 static int
491 kcf_resubmit_request(kcf_areq_node_t *areq)
492 {
493 int error = CRYPTO_FAILED;
494 kcf_context_t *ictx;
495 kcf_provider_desc_t *old_pd;
496 kcf_provider_desc_t *new_pd;
497 crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
498 crypto_mech_type_t prov_mt1, prov_mt2;
499 crypto_func_group_t fg;
500
501 if (!can_resubmit(areq, &mech1, &mech2, &fg))
502 return (error);
503
504 old_pd = areq->an_provider;
505 /*
506 * Add old_pd to the list of providers already tried. We release
507 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
508 * kcf_free_triedlist().
509 */
510 if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
511 KM_NOSLEEP) == NULL)
512 return (error);
513
514 if (mech1 && !mech2) {
515 new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
516 areq->an_tried_plist, fg,
517 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
518 } else {
519 ASSERT(mech1 != NULL && mech2 != NULL);
520
521 new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
522 &prov_mt2, &error, areq->an_tried_plist, fg, fg,
523 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
524 }
525
526 if (new_pd == NULL)
527 return (error);
528
529 /*
530 * We reuse the old context by resetting provider specific
531 * fields in it.
532 */
533 if ((ictx = areq->an_context) != NULL) {
534 crypto_ctx_t *ctx;
535
536 ASSERT(old_pd == ictx->kc_prov_desc);
537 KCF_PROV_REFRELE(ictx->kc_prov_desc);
538 KCF_PROV_REFHOLD(new_pd);
539 ictx->kc_prov_desc = new_pd;
540
541 ctx = &ictx->kc_glbl_ctx;
542 ctx->cc_provider = new_pd->pd_prov_handle;
543 ctx->cc_session = new_pd->pd_sid;
544 ctx->cc_provider_private = NULL;
545 }
546
547 /* We reuse areq. by resetting the provider and context fields. */
548 KCF_PROV_REFRELE(old_pd);
549 KCF_PROV_REFHOLD(new_pd);
550 areq->an_provider = new_pd;
551 mutex_enter(&areq->an_lock);
552 areq->an_state = REQ_WAITING;
553 mutex_exit(&areq->an_lock);
554
555 switch (new_pd->pd_prov_type) {
556 case CRYPTO_SW_PROVIDER:
557 error = kcf_disp_sw_request(areq);
558 break;
559
560 case CRYPTO_HW_PROVIDER: {
561 taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;
562
563 if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
564 (taskqid_t)0) {
565 error = CRYPTO_HOST_MEMORY;
566 } else {
567 error = CRYPTO_QUEUED;
568 }
569
570 break;
571 default:
572 break;
573 }
574 }
575
576 return (error);
577 }
578
579 static inline int EMPTY_TASKQ(taskq_t *tq)
580 {
581 #ifdef _KERNEL
582 return (tq->tq_lowest_id == tq->tq_next_id);
583 #else
584 return (tq->tq_task.tqent_next == &tq->tq_task || tq->tq_active == 0);
585 #endif
586 }
587
588 /*
589 * Routine called by both ioctl and k-api. The consumer should
590 * bundle the parameters into a kcf_req_params_t structure. A bunch
591 * of macros are available in ops_impl.h for this bundling. They are:
592 *
593 * KCF_WRAP_DIGEST_OPS_PARAMS()
594 * KCF_WRAP_MAC_OPS_PARAMS()
595 * KCF_WRAP_ENCRYPT_OPS_PARAMS()
596 * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
597 *
598 * It is the caller's responsibility to free the ctx argument when
599 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
600 */
601 int
602 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
603 crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
604 {
605 int error = CRYPTO_SUCCESS;
606 kcf_areq_node_t *areq;
607 kcf_sreq_node_t *sreq;
608 kcf_context_t *kcf_ctx;
609 taskq_t *taskq = pd->pd_sched_info.ks_taskq;
610
611 kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
612
613 /* Synchronous cases */
614 if (crq == NULL) {
615 switch (pd->pd_prov_type) {
616 case CRYPTO_SW_PROVIDER:
617 error = common_submit_request(pd, ctx, params,
618 KCF_RHNDL(KM_SLEEP));
619 break;
620
621 case CRYPTO_HW_PROVIDER:
622 /*
623 * Special case for CRYPTO_SYNCHRONOUS providers that
624 * never return a CRYPTO_QUEUED error. We skip any
625 * request allocation and call the SPI directly.
626 */
627 if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
628 EMPTY_TASKQ(taskq)) {
629 KCF_PROV_IREFHOLD(pd);
630 if (pd->pd_state == KCF_PROV_READY) {
631 error = common_submit_request(pd, ctx,
632 params, KCF_RHNDL(KM_SLEEP));
633 KCF_PROV_IREFRELE(pd);
634 ASSERT(error != CRYPTO_QUEUED);
635 break;
636 }
637 KCF_PROV_IREFRELE(pd);
638 }
639
640 sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
641 sreq->sn_state = REQ_ALLOCATED;
642 sreq->sn_rv = CRYPTO_FAILED;
643 sreq->sn_params = params;
644
645 /*
646 * Note that we do not need to hold the context
647 * for synchronous case as the context will never
648 * become invalid underneath us. We do not need to hold
649 * the provider here either as the caller has a hold.
650 */
651 sreq->sn_context = kcf_ctx;
652 ASSERT(KCF_PROV_REFHELD(pd));
653 sreq->sn_provider = pd;
654
655 ASSERT(taskq != NULL);
656 /*
657 * Call the SPI directly if the taskq is empty and the
658 * provider is not busy, else dispatch to the taskq.
659 * Calling directly is fine as this is the synchronous
660 * case. This is unlike the asynchronous case where we
661 * must always dispatch to the taskq.
662 */
663 if (EMPTY_TASKQ(taskq) &&
664 pd->pd_state == KCF_PROV_READY) {
665 process_req_hwp(sreq);
666 } else {
667 /*
668 * We can not tell from taskq_dispatch() return
669 * value if we exceeded maxalloc. Hence the
670 * check here. Since we are allowed to wait in
671 * the synchronous case, we wait for the taskq
672 * to become empty.
673 */
674 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
675 taskq_wait(taskq);
676 }
677
678 (void) taskq_dispatch(taskq, process_req_hwp,
679 sreq, TQ_SLEEP);
680 }
681
682 /*
683 * Wait for the notification to arrive,
684 * if the operation is not done yet.
685 * Bug# 4722589 will make the wait a cv_wait_sig().
686 */
687 mutex_enter(&sreq->sn_lock);
688 while (sreq->sn_state < REQ_DONE)
689 cv_wait(&sreq->sn_cv, &sreq->sn_lock);
690 mutex_exit(&sreq->sn_lock);
691
692 error = sreq->sn_rv;
693 kmem_cache_free(kcf_sreq_cache, sreq);
694
695 break;
696
697 default:
698 error = CRYPTO_FAILED;
699 break;
700 }
701
702 } else { /* Asynchronous cases */
703 switch (pd->pd_prov_type) {
704 case CRYPTO_SW_PROVIDER:
705 if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
706 /*
707 * This case has less overhead since there is
708 * no switching of context.
709 */
710 error = common_submit_request(pd, ctx, params,
711 KCF_RHNDL(KM_NOSLEEP));
712 } else {
713 /*
714 * CRYPTO_ALWAYS_QUEUE is set. We need to
715 * queue the request and return.
716 */
717 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
718 params, cont);
719 if (areq == NULL)
720 error = CRYPTO_HOST_MEMORY;
721 else {
722 if (!(crq->cr_flag
723 & CRYPTO_SKIP_REQID)) {
724 /*
725 * Set the request handle. This handle
726 * is used for any crypto_cancel_req(9f)
727 * calls from the consumer. We have to
728 * do this before dispatching the
729 * request.
730 */
731 crq->cr_reqid = kcf_reqid_insert(areq);
732 }
733
734 error = kcf_disp_sw_request(areq);
735 /*
736 * There is an error processing this
737 * request. Remove the handle and
738 * release the request structure.
739 */
740 if (error != CRYPTO_QUEUED) {
741 if (!(crq->cr_flag
742 & CRYPTO_SKIP_REQID))
743 kcf_reqid_delete(areq);
744 KCF_AREQ_REFRELE(areq);
745 }
746 }
747 }
748 break;
749
750 case CRYPTO_HW_PROVIDER:
751 /*
752 * We need to queue the request and return.
753 */
754 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
755 cont);
756 if (areq == NULL) {
757 error = CRYPTO_HOST_MEMORY;
758 goto done;
759 }
760
761 ASSERT(taskq != NULL);
762 /*
763 * We can not tell from taskq_dispatch() return
764 * value if we exceeded maxalloc. Hence the check
765 * here.
766 */
767 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
768 error = CRYPTO_BUSY;
769 KCF_AREQ_REFRELE(areq);
770 goto done;
771 }
772
773 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
774 /*
775 * Set the request handle. This handle is used
776 * for any crypto_cancel_req(9f) calls from the
777 * consumer. We have to do this before dispatching
778 * the request.
779 */
780 crq->cr_reqid = kcf_reqid_insert(areq);
781 }
782
783 if (taskq_dispatch(taskq,
784 process_req_hwp, areq, TQ_NOSLEEP) ==
785 (taskqid_t)0) {
786 error = CRYPTO_HOST_MEMORY;
787 if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
788 kcf_reqid_delete(areq);
789 KCF_AREQ_REFRELE(areq);
790 } else {
791 error = CRYPTO_QUEUED;
792 }
793 break;
794
795 default:
796 error = CRYPTO_FAILED;
797 break;
798 }
799 }
800
801 done:
802 return (error);
803 }
804
805 /*
806 * We're done with this framework context, so free it. Note that freeing
807 * framework context (kcf_context) frees the global context (crypto_ctx).
808 *
809 * The provider is responsible for freeing provider private context after a
810 * final or single operation and resetting the cc_provider_private field
811 * to NULL. It should do this before it notifies the framework of the
812 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
813 * like crypto_cancel_ctx(9f).
814 */
815 void
816 kcf_free_context(kcf_context_t *kcf_ctx)
817 {
818 kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
819 crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
820 kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
821
822 /* Release the second context, if any */
823
824 if (kcf_secondctx != NULL)
825 KCF_CONTEXT_REFRELE(kcf_secondctx);
826
827 if (gctx->cc_provider_private != NULL) {
828 mutex_enter(&pd->pd_lock);
829 if (!KCF_IS_PROV_REMOVED(pd)) {
830 /*
831 * Increment the provider's internal refcnt so it
832 * doesn't unregister from the framework while
833 * we're calling the entry point.
834 */
835 KCF_PROV_IREFHOLD(pd);
836 mutex_exit(&pd->pd_lock);
837 (void) KCF_PROV_FREE_CONTEXT(pd, gctx);
838 KCF_PROV_IREFRELE(pd);
839 } else {
840 mutex_exit(&pd->pd_lock);
841 }
842 }
843
844 /* kcf_ctx->kc_prov_desc has a hold on pd */
845 KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);
846
847 /* check if this context is shared with a software provider */
848 if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
849 kcf_ctx->kc_sw_prov_desc != NULL) {
850 KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
851 }
852
853 kmem_cache_free(kcf_context_cache, kcf_ctx);
854 }
855
856 /*
857 * Free the request after releasing all the holds.
858 */
859 void
860 kcf_free_req(kcf_areq_node_t *areq)
861 {
862 KCF_PROV_REFRELE(areq->an_provider);
863 if (areq->an_context != NULL)
864 KCF_CONTEXT_REFRELE(areq->an_context);
865
866 if (areq->an_tried_plist != NULL)
867 kcf_free_triedlist(areq->an_tried_plist);
868 kmem_cache_free(kcf_areq_cache, areq);
869 }
870
871 /*
872 * Utility routine to remove a request from the chain of requests
873 * hanging off a context.
874 */
875 void
876 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
877 {
878 kcf_areq_node_t *cur, *prev;
879
880 /*
881 * Get context lock, search for areq in the chain and remove it.
882 */
883 ASSERT(ictx != NULL);
884 mutex_enter(&ictx->kc_in_use_lock);
885 prev = cur = ictx->kc_req_chain_first;
886
887 while (cur != NULL) {
888 if (cur == areq) {
889 if (prev == cur) {
890 if ((ictx->kc_req_chain_first =
891 cur->an_ctxchain_next) == NULL)
892 ictx->kc_req_chain_last = NULL;
893 } else {
894 if (cur == ictx->kc_req_chain_last)
895 ictx->kc_req_chain_last = prev;
896 prev->an_ctxchain_next = cur->an_ctxchain_next;
897 }
898
899 break;
900 }
901 prev = cur;
902 cur = cur->an_ctxchain_next;
903 }
904 mutex_exit(&ictx->kc_in_use_lock);
905 }
906
907 /*
908 * Remove the specified node from the global software queue.
909 *
910 * The caller must hold the queue lock and request lock (an_lock).
911 */
912 void
913 kcf_remove_node(kcf_areq_node_t *node)
914 {
915 kcf_areq_node_t *nextp = node->an_next;
916 kcf_areq_node_t *prevp = node->an_prev;
917
918 if (nextp != NULL)
919 nextp->an_prev = prevp;
920 else
921 gswq->gs_last = prevp;
922
923 if (prevp != NULL)
924 prevp->an_next = nextp;
925 else
926 gswq->gs_first = nextp;
927
928 node->an_state = REQ_CANCELED;
929 }
930
931 /*
932 * Add the request node to the end of the global software queue.
933 *
934 * The caller should not hold the queue lock. Returns 0 if the
935 * request is successfully queued. Returns CRYPTO_BUSY if the limit
936 * on the number of jobs is exceeded.
937 */
938 static int
939 kcf_enqueue(kcf_areq_node_t *node)
940 {
941 kcf_areq_node_t *tnode;
942
943 mutex_enter(&gswq->gs_lock);
944
945 if (gswq->gs_njobs >= gswq->gs_maxjobs) {
946 mutex_exit(&gswq->gs_lock);
947 return (CRYPTO_BUSY);
948 }
949
950 if (gswq->gs_last == NULL) {
951 gswq->gs_first = gswq->gs_last = node;
952 } else {
953 ASSERT(gswq->gs_last->an_next == NULL);
954 tnode = gswq->gs_last;
955 tnode->an_next = node;
956 gswq->gs_last = node;
957 node->an_prev = tnode;
958 }
959
960 gswq->gs_njobs++;
961
962 /* an_lock not needed here as we hold gs_lock */
963 node->an_state = REQ_WAITING;
964
965 mutex_exit(&gswq->gs_lock);
966
967 return (0);
968 }
969
970 /*
971 * kmem_cache_alloc constructor for sync request structure.
972 */
973 /* ARGSUSED */
974 static int
975 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags)
976 {
977 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
978
979 sreq->sn_type = CRYPTO_SYNCH;
980 cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL);
981 mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL);
982
983 return (0);
984 }
985
986 /* ARGSUSED */
987 static void
988 kcf_sreq_cache_destructor(void *buf, void *cdrarg)
989 {
990 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
991
992 mutex_destroy(&sreq->sn_lock);
993 cv_destroy(&sreq->sn_cv);
994 }
995
996 /*
997 * kmem_cache_alloc constructor for async request structure.
998 */
999 /* ARGSUSED */
1000 static int
1001 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1002 {
1003 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1004
1005 areq->an_type = CRYPTO_ASYNCH;
1006 areq->an_refcnt = 0;
1007 mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL);
1008 cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL);
1009 cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL);
1010
1011 return (0);
1012 }
1013
1014 /* ARGSUSED */
1015 static void
1016 kcf_areq_cache_destructor(void *buf, void *cdrarg)
1017 {
1018 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1019
1020 ASSERT(areq->an_refcnt == 0);
1021 mutex_destroy(&areq->an_lock);
1022 cv_destroy(&areq->an_done);
1023 cv_destroy(&areq->an_turn_cv);
1024 }
1025
1026 /*
1027 * kmem_cache_alloc constructor for kcf_context structure.
1028 */
1029 /* ARGSUSED */
1030 static int
1031 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags)
1032 {
1033 kcf_context_t *kctx = (kcf_context_t *)buf;
1034
1035 kctx->kc_refcnt = 0;
1036 mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL);
1037
1038 return (0);
1039 }
1040
1041 /* ARGSUSED */
1042 static void
1043 kcf_context_cache_destructor(void *buf, void *cdrarg)
1044 {
1045 kcf_context_t *kctx = (kcf_context_t *)buf;
1046
1047 ASSERT(kctx->kc_refcnt == 0);
1048 mutex_destroy(&kctx->kc_in_use_lock);
1049 }
1050
1051 void
1052 kcf_sched_destroy(void)
1053 {
1054 int i;
1055
1056 if (kcf_misc_kstat)
1057 kstat_delete(kcf_misc_kstat);
1058
1059 if (kcfpool)
1060 kmem_free(kcfpool, sizeof (kcf_pool_t));
1061
1062 for (i = 0; i < REQID_TABLES; i++) {
1063 if (kcf_reqid_table[i])
1064 kmem_free(kcf_reqid_table[i],
1065 sizeof (kcf_reqid_table_t));
1066 }
1067
1068 if (gswq)
1069 kmem_free(gswq, sizeof (kcf_global_swq_t));
1070
1071 if (kcf_context_cache)
1072 kmem_cache_destroy(kcf_context_cache);
1073 if (kcf_areq_cache)
1074 kmem_cache_destroy(kcf_areq_cache);
1075 if (kcf_sreq_cache)
1076 kmem_cache_destroy(kcf_sreq_cache);
1077 }
1078
1079 /*
1080 * Creates and initializes all the structures needed by the framework.
1081 */
1082 void
1083 kcf_sched_init(void)
1084 {
1085 int i;
1086 kcf_reqid_table_t *rt;
1087
1088 /*
1089 * Create all the kmem caches needed by the framework. We set the
1090 * align argument to 64, to get a slab aligned to 64-byte as well as
1091 * have the objects (cache_chunksize) to be a 64-byte multiple.
1092 * This helps to avoid false sharing as this is the size of the
1093 * CPU cache line.
1094 */
1095 kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache",
1096 sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor,
1097 kcf_sreq_cache_destructor, NULL, NULL, NULL, 0);
1098
1099 kcf_areq_cache = kmem_cache_create("kcf_areq_cache",
1100 sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor,
1101 kcf_areq_cache_destructor, NULL, NULL, NULL, 0);
1102
1103 kcf_context_cache = kmem_cache_create("kcf_context_cache",
1104 sizeof (struct kcf_context), 64, kcf_context_cache_constructor,
1105 kcf_context_cache_destructor, NULL, NULL, NULL, 0);
1106
1107 gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP);
1108
1109 mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
1110 cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
1111 gswq->gs_njobs = 0;
1112 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1113 gswq->gs_first = gswq->gs_last = NULL;
1114
1115 /* Initialize the global reqid table */
1116 for (i = 0; i < REQID_TABLES; i++) {
1117 rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP);
1118 kcf_reqid_table[i] = rt;
1119 mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL);
1120 rt->rt_curid = i;
1121 }
1122
1123 /* Allocate and initialize the thread pool */
1124 kcfpool_alloc();
1125
1126 /* Initialize the event notification list variables */
1127 mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL);
1128 cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL);
1129
1130 /* Create the kcf kstat */
1131 kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto",
1132 KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t),
1133 KSTAT_FLAG_VIRTUAL);
1134
1135 if (kcf_misc_kstat != NULL) {
1136 kcf_misc_kstat->ks_data = &kcf_ksdata;
1137 kcf_misc_kstat->ks_update = kcf_misc_kstat_update;
1138 kstat_install(kcf_misc_kstat);
1139 }
1140 }
1141
1142 /*
1143 * Signal the waiting sync client.
1144 */
1145 void
1146 kcf_sop_done(kcf_sreq_node_t *sreq, int error)
1147 {
1148 mutex_enter(&sreq->sn_lock);
1149 sreq->sn_state = REQ_DONE;
1150 sreq->sn_rv = error;
1151 cv_signal(&sreq->sn_cv);
1152 mutex_exit(&sreq->sn_lock);
1153 }
1154
1155 /*
1156 * Callback the async client with the operation status.
1157 * We free the async request node and possibly the context.
1158 * We also handle any chain of requests hanging off of
1159 * the context.
1160 */
1161 void
1162 kcf_aop_done(kcf_areq_node_t *areq, int error)
1163 {
1164 kcf_op_type_t optype;
1165 boolean_t skip_notify = B_FALSE;
1166 kcf_context_t *ictx;
1167 kcf_areq_node_t *nextreq;
1168
1169 /*
1170 * Handle recoverable errors. This has to be done first
1171 * before doing any thing else in this routine so that
1172 * we do not change the state of the request.
1173 */
1174 if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) {
1175 /*
1176 * We try another provider, if one is available. Else
1177 * we continue with the failure notification to the
1178 * client.
1179 */
1180 if (kcf_resubmit_request(areq) == CRYPTO_QUEUED)
1181 return;
1182 }
1183
1184 mutex_enter(&areq->an_lock);
1185 areq->an_state = REQ_DONE;
1186 mutex_exit(&areq->an_lock);
1187
1188 optype = (&areq->an_params)->rp_optype;
1189 if ((ictx = areq->an_context) != NULL) {
1190 /*
1191 * A request after it is removed from the request
1192 * queue, still stays on a chain of requests hanging
1193 * of its context structure. It needs to be removed
1194 * from this chain at this point.
1195 */
1196 mutex_enter(&ictx->kc_in_use_lock);
1197 nextreq = areq->an_ctxchain_next;
1198 if (nextreq != NULL) {
1199 mutex_enter(&nextreq->an_lock);
1200 nextreq->an_is_my_turn = B_TRUE;
1201 cv_signal(&nextreq->an_turn_cv);
1202 mutex_exit(&nextreq->an_lock);
1203 }
1204
1205 ictx->kc_req_chain_first = nextreq;
1206 if (nextreq == NULL)
1207 ictx->kc_req_chain_last = NULL;
1208 mutex_exit(&ictx->kc_in_use_lock);
1209
1210 if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) {
1211 ASSERT(nextreq == NULL);
1212 KCF_CONTEXT_REFRELE(ictx);
1213 } else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) {
1214 /*
1215 * NOTE - We do not release the context in case of update
1216 * operations. We require the consumer to free it explicitly,
1217 * in case it wants to abandon an update operation. This is done
1218 * as there may be mechanisms in ECB mode that can continue
1219 * even if an operation on a block fails.
1220 */
1221 KCF_CONTEXT_REFRELE(ictx);
1222 }
1223 }
1224
1225 /* Deal with the internal continuation to this request first */
1226
1227 if (areq->an_isdual) {
1228 kcf_dual_req_t *next_arg;
1229 next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
1230 next_arg->kr_areq = areq;
1231 KCF_AREQ_REFHOLD(areq);
1232 areq->an_isdual = B_FALSE;
1233
1234 NOTIFY_CLIENT(areq, error);
1235 return;
1236 }
1237
1238 /*
1239 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
1240 * always. If this flag is clear, we skip the notification
1241 * provided there are no errors. We check this flag for only
1242 * init or update operations. It is ignored for single, final or
1243 * atomic operations.
1244 */
1245 skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) &&
1246 (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) &&
1247 (error == CRYPTO_SUCCESS);
1248
1249 if (!skip_notify) {
1250 NOTIFY_CLIENT(areq, error);
1251 }
1252
1253 if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID))
1254 kcf_reqid_delete(areq);
1255
1256 KCF_AREQ_REFRELE(areq);
1257 }
1258
1259 /*
1260 * Allocate the thread pool and initialize all the fields.
1261 */
1262 static void
1263 kcfpool_alloc()
1264 {
1265 kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP);
1266
1267 kcfpool->kp_threads = kcfpool->kp_idlethreads = 0;
1268 kcfpool->kp_blockedthreads = 0;
1269 kcfpool->kp_signal_create_thread = B_FALSE;
1270 kcfpool->kp_nthrs = 0;
1271 kcfpool->kp_user_waiting = B_FALSE;
1272
1273 mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
1274 cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL);
1275
1276 mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL);
1277 cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
1278
1279 kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT;
1280 }
1281
1282 /*
1283 * Insert the async request in the hash table after assigning it
1284 * an ID. Returns the ID.
1285 *
1286 * The ID is used by the caller to pass as an argument to a
1287 * cancel_req() routine later.
1288 */
1289 static crypto_req_id_t
1290 kcf_reqid_insert(kcf_areq_node_t *areq)
1291 {
1292 int indx;
1293 crypto_req_id_t id;
1294 kcf_areq_node_t *headp;
1295 kcf_reqid_table_t *rt =
1296 kcf_reqid_table[CPU_SEQID & REQID_TABLE_MASK];
1297
1298 mutex_enter(&rt->rt_lock);
1299
1300 rt->rt_curid = id =
1301 (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH;
1302 SET_REQID(areq, id);
1303 indx = REQID_HASH(id);
1304 headp = areq->an_idnext = rt->rt_idhash[indx];
1305 areq->an_idprev = NULL;
1306 if (headp != NULL)
1307 headp->an_idprev = areq;
1308
1309 rt->rt_idhash[indx] = areq;
1310 mutex_exit(&rt->rt_lock);
1311
1312 return (id);
1313 }
1314
1315 /*
1316 * Delete the async request from the hash table.
1317 */
1318 static void
1319 kcf_reqid_delete(kcf_areq_node_t *areq)
1320 {
1321 int indx;
1322 kcf_areq_node_t *nextp, *prevp;
1323 crypto_req_id_t id = GET_REQID(areq);
1324 kcf_reqid_table_t *rt;
1325
1326 rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1327 indx = REQID_HASH(id);
1328
1329 mutex_enter(&rt->rt_lock);
1330
1331 nextp = areq->an_idnext;
1332 prevp = areq->an_idprev;
1333 if (nextp != NULL)
1334 nextp->an_idprev = prevp;
1335 if (prevp != NULL)
1336 prevp->an_idnext = nextp;
1337 else
1338 rt->rt_idhash[indx] = nextp;
1339
1340 SET_REQID(areq, 0);
1341 cv_broadcast(&areq->an_done);
1342
1343 mutex_exit(&rt->rt_lock);
1344 }
1345
1346 /*
1347 * Cancel a single asynchronous request.
1348 *
1349 * We guarantee that no problems will result from calling
1350 * crypto_cancel_req() for a request which is either running, or
1351 * has already completed. We remove the request from any queues
1352 * if it is possible. We wait for request completion if the
1353 * request is dispatched to a provider.
1354 *
1355 * Calling context:
1356 * Can be called from user context only.
1357 *
1358 * NOTE: We acquire the following locks in this routine (in order):
1359 * - rt_lock (kcf_reqid_table_t)
1360 * - gswq->gs_lock
1361 * - areq->an_lock
1362 * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
1363 *
1364 * This locking order MUST be maintained in code every where else.
1365 */
1366 void
1367 crypto_cancel_req(crypto_req_id_t id)
1368 {
1369 int indx;
1370 kcf_areq_node_t *areq;
1371 kcf_provider_desc_t *pd;
1372 kcf_context_t *ictx;
1373 kcf_reqid_table_t *rt;
1374
1375 rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1376 indx = REQID_HASH(id);
1377
1378 mutex_enter(&rt->rt_lock);
1379 for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
1380 if (GET_REQID(areq) == id) {
1381 /*
1382 * We found the request. It is either still waiting
1383 * in the framework queues or running at the provider.
1384 */
1385 pd = areq->an_provider;
1386 ASSERT(pd != NULL);
1387
1388 switch (pd->pd_prov_type) {
1389 case CRYPTO_SW_PROVIDER:
1390 mutex_enter(&gswq->gs_lock);
1391 mutex_enter(&areq->an_lock);
1392
1393 /* This request can be safely canceled. */
1394 if (areq->an_state <= REQ_WAITING) {
1395 /* Remove from gswq, global software queue. */
1396 kcf_remove_node(areq);
1397 if ((ictx = areq->an_context) != NULL)
1398 kcf_removereq_in_ctxchain(ictx, areq);
1399
1400 mutex_exit(&areq->an_lock);
1401 mutex_exit(&gswq->gs_lock);
1402 mutex_exit(&rt->rt_lock);
1403
1404 /* Remove areq from hash table and free it. */
1405 kcf_reqid_delete(areq);
1406 KCF_AREQ_REFRELE(areq);
1407 return;
1408 }
1409
1410 mutex_exit(&areq->an_lock);
1411 mutex_exit(&gswq->gs_lock);
1412 break;
1413
1414 case CRYPTO_HW_PROVIDER:
1415 /*
1416 * There is no interface to remove an entry
1417 * once it is on the taskq. So, we do not do
1418 * any thing for a hardware provider.
1419 */
1420 break;
1421 default:
1422 break;
1423 }
1424
1425 /*
1426 * The request is running. Wait for the request completion
1427 * to notify us.
1428 */
1429 KCF_AREQ_REFHOLD(areq);
1430 while (GET_REQID(areq) == id)
1431 cv_wait(&areq->an_done, &rt->rt_lock);
1432 KCF_AREQ_REFRELE(areq);
1433 break;
1434 }
1435 }
1436
1437 mutex_exit(&rt->rt_lock);
1438 }
1439
1440 /*
1441 * Cancel all asynchronous requests associated with the
1442 * passed in crypto context and free it.
1443 *
1444 * A client SHOULD NOT call this routine after calling a crypto_*_final
1445 * routine. This routine is called only during intermediate operations.
1446 * The client should not use the crypto context after this function returns
1447 * since we destroy it.
1448 *
1449 * Calling context:
1450 * Can be called from user context only.
1451 */
1452 void
1453 crypto_cancel_ctx(crypto_context_t ctx)
1454 {
1455 kcf_context_t *ictx;
1456 kcf_areq_node_t *areq;
1457
1458 if (ctx == NULL)
1459 return;
1460
1461 ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
1462
1463 mutex_enter(&ictx->kc_in_use_lock);
1464
1465 /* Walk the chain and cancel each request */
1466 while ((areq = ictx->kc_req_chain_first) != NULL) {
1467 /*
1468 * We have to drop the lock here as we may have
1469 * to wait for request completion. We hold the
1470 * request before dropping the lock though, so that it
1471 * won't be freed underneath us.
1472 */
1473 KCF_AREQ_REFHOLD(areq);
1474 mutex_exit(&ictx->kc_in_use_lock);
1475
1476 crypto_cancel_req(GET_REQID(areq));
1477 KCF_AREQ_REFRELE(areq);
1478
1479 mutex_enter(&ictx->kc_in_use_lock);
1480 }
1481
1482 mutex_exit(&ictx->kc_in_use_lock);
1483 KCF_CONTEXT_REFRELE(ictx);
1484 }
1485
1486 /*
1487 * Update kstats.
1488 */
1489 static int
1490 kcf_misc_kstat_update(kstat_t *ksp, int rw)
1491 {
1492 uint_t tcnt;
1493 kcf_stats_t *ks_data;
1494
1495 if (rw == KSTAT_WRITE)
1496 return (EACCES);
1497
1498 ks_data = ksp->ks_data;
1499
1500 ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads;
1501 /*
1502 * The failover thread is counted in kp_idlethreads in
1503 * some corner cases. This is done to avoid doing more checks
1504 * when submitting a request. We account for those cases below.
1505 */
1506 if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1))
1507 tcnt--;
1508 ks_data->ks_idle_thrs.value.ui32 = tcnt;
1509 ks_data->ks_minthrs.value.ui32 = kcf_minthreads;
1510 ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
1511 ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
1512 ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
1513 ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads;
1514 ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc;
1515 ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc;
1516
1517 return (0);
1518 }
1519
1520 /*
1521 * Allocate and initiatize a kcf_dual_req, used for saving the arguments of
1522 * a dual operation or an atomic operation that has to be internally
1523 * simulated with multiple single steps.
1524 * crq determines the memory allocation flags.
1525 */
1526
1527 kcf_dual_req_t *
1528 kcf_alloc_req(crypto_call_req_t *crq)
1529 {
1530 kcf_dual_req_t *kcr;
1531
1532 kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
1533
1534 if (kcr == NULL)
1535 return (NULL);
1536
1537 /* Copy the whole crypto_call_req struct, as it isn't persistant */
1538 if (crq != NULL)
1539 kcr->kr_callreq = *crq;
1540 else
1541 bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
1542 kcr->kr_areq = NULL;
1543 kcr->kr_saveoffset = 0;
1544 kcr->kr_savelen = 0;
1545
1546 return (kcr);
1547 }
1548
1549 /*
1550 * Callback routine for the next part of a simulated dual part.
1551 * Schedules the next step.
1552 *
1553 * This routine can be called from interrupt context.
1554 */
1555 void
1556 kcf_next_req(void *next_req_arg, int status)
1557 {
1558 kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
1559 kcf_req_params_t *params = &(next_req->kr_params);
1560 kcf_areq_node_t *areq = next_req->kr_areq;
1561 int error = status;
1562 kcf_provider_desc_t *pd = NULL;
1563 crypto_dual_data_t *ct = NULL;
1564
1565 /* Stop the processing if an error occured at this step */
1566 if (error != CRYPTO_SUCCESS) {
1567 out:
1568 areq->an_reqarg = next_req->kr_callreq;
1569 KCF_AREQ_REFRELE(areq);
1570 kmem_free(next_req, sizeof (kcf_dual_req_t));
1571 areq->an_isdual = B_FALSE;
1572 kcf_aop_done(areq, error);
1573 return;
1574 }
1575
1576 switch (params->rp_opgrp) {
1577 case KCF_OG_MAC: {
1578
1579 /*
1580 * The next req is submitted with the same reqid as the
1581 * first part. The consumer only got back that reqid, and
1582 * should still be able to cancel the operation during its
1583 * second step.
1584 */
1585 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
1586 crypto_ctx_template_t mac_tmpl;
1587 kcf_mech_entry_t *me;
1588
1589 ct = (crypto_dual_data_t *)mops->mo_data;
1590 mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
1591
1592 /* No expected recoverable failures, so no retry list */
1593 pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
1594 &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
1595 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);
1596
1597 if (pd == NULL) {
1598 error = CRYPTO_MECH_NOT_SUPPORTED;
1599 goto out;
1600 }
1601 /* Validate the MAC context template here */
1602 if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
1603 (mac_tmpl != NULL)) {
1604 kcf_ctx_template_t *ctx_mac_tmpl;
1605
1606 ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
1607
1608 if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
1609 KCF_PROV_REFRELE(pd);
1610 error = CRYPTO_OLD_CTX_TEMPLATE;
1611 goto out;
1612 }
1613 mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
1614 }
1615
1616 break;
1617 }
1618 case KCF_OG_DECRYPT: {
1619 kcf_decrypt_ops_params_t *dcrops =
1620 &(params->rp_u.decrypt_params);
1621
1622 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
1623 /* No expected recoverable failures, so no retry list */
1624 pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
1625 NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
1626 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);
1627
1628 if (pd == NULL) {
1629 error = CRYPTO_MECH_NOT_SUPPORTED;
1630 goto out;
1631 }
1632 break;
1633 }
1634 default:
1635 break;
1636 }
1637
1638 /* The second step uses len2 and offset2 of the dual_data */
1639 next_req->kr_saveoffset = ct->dd_offset1;
1640 next_req->kr_savelen = ct->dd_len1;
1641 ct->dd_offset1 = ct->dd_offset2;
1642 ct->dd_len1 = ct->dd_len2;
1643
1644 /* preserve if the caller is restricted */
1645 if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
1646 areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
1647 } else {
1648 areq->an_reqarg.cr_flag = 0;
1649 }
1650
1651 areq->an_reqarg.cr_callback_func = kcf_last_req;
1652 areq->an_reqarg.cr_callback_arg = next_req;
1653 areq->an_isdual = B_TRUE;
1654
1655 /*
1656 * We would like to call kcf_submit_request() here. But,
1657 * that is not possible as that routine allocates a new
1658 * kcf_areq_node_t request structure, while we need to
1659 * reuse the existing request structure.
1660 */
1661 switch (pd->pd_prov_type) {
1662 case CRYPTO_SW_PROVIDER:
1663 error = common_submit_request(pd, NULL, params,
1664 KCF_RHNDL(KM_NOSLEEP));
1665 break;
1666
1667 case CRYPTO_HW_PROVIDER: {
1668 kcf_provider_desc_t *old_pd;
1669 taskq_t *taskq = pd->pd_sched_info.ks_taskq;
1670
1671 /*
1672 * Set the params for the second step in the
1673 * dual-ops.
1674 */
1675 areq->an_params = *params;
1676 old_pd = areq->an_provider;
1677 KCF_PROV_REFRELE(old_pd);
1678 KCF_PROV_REFHOLD(pd);
1679 areq->an_provider = pd;
1680
1681 /*
1682 * Note that we have to do a taskq_dispatch()
1683 * here as we may be in interrupt context.
1684 */
1685 if (taskq_dispatch(taskq, process_req_hwp, areq,
1686 TQ_NOSLEEP) == (taskqid_t)0) {
1687 error = CRYPTO_HOST_MEMORY;
1688 } else {
1689 error = CRYPTO_QUEUED;
1690 }
1691 break;
1692 }
1693 default:
1694 break;
1695 }
1696
1697 /*
1698 * We have to release the holds on the request and the provider
1699 * in all cases.
1700 */
1701 KCF_AREQ_REFRELE(areq);
1702 KCF_PROV_REFRELE(pd);
1703
1704 if (error != CRYPTO_QUEUED) {
1705 /* restore, clean up, and invoke the client's callback */
1706
1707 ct->dd_offset1 = next_req->kr_saveoffset;
1708 ct->dd_len1 = next_req->kr_savelen;
1709 areq->an_reqarg = next_req->kr_callreq;
1710 kmem_free(next_req, sizeof (kcf_dual_req_t));
1711 areq->an_isdual = B_FALSE;
1712 kcf_aop_done(areq, error);
1713 }
1714 }
1715
1716 /*
1717 * Last part of an emulated dual operation.
1718 * Clean up and restore ...
1719 */
1720 void
1721 kcf_last_req(void *last_req_arg, int status)
1722 {
1723 kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
1724
1725 kcf_req_params_t *params = &(last_req->kr_params);
1726 kcf_areq_node_t *areq = last_req->kr_areq;
1727 crypto_dual_data_t *ct = NULL;
1728
1729 switch (params->rp_opgrp) {
1730 case KCF_OG_MAC: {
1731 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
1732
1733 ct = (crypto_dual_data_t *)mops->mo_data;
1734 break;
1735 }
1736 case KCF_OG_DECRYPT: {
1737 kcf_decrypt_ops_params_t *dcrops =
1738 &(params->rp_u.decrypt_params);
1739
1740 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
1741 break;
1742 }
1743 default:
1744 break;
1745 }
1746 ct->dd_offset1 = last_req->kr_saveoffset;
1747 ct->dd_len1 = last_req->kr_savelen;
1748
1749 /* The submitter used kcf_last_req as its callback */
1750
1751 if (areq == NULL) {
1752 crypto_call_req_t *cr = &last_req->kr_callreq;
1753
1754 (*(cr->cr_callback_func))(cr->cr_callback_arg, status);
1755 kmem_free(last_req, sizeof (kcf_dual_req_t));
1756 return;
1757 }
1758 areq->an_reqarg = last_req->kr_callreq;
1759 KCF_AREQ_REFRELE(areq);
1760 kmem_free(last_req, sizeof (kcf_dual_req_t));
1761 areq->an_isdual = B_FALSE;
1762 kcf_aop_done(areq, status);
1763 }