4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * This file contains the core framework routines for the
28 * kernel cryptographic framework. These routines are at the
29 * layer, between the kernel API/ioctls and the SPI.
32 #include <sys/zfs_context.h>
33 #include <sys/crypto/common.h>
34 #include <sys/crypto/impl.h>
35 #include <sys/crypto/sched_impl.h>
36 #include <sys/crypto/api.h>
38 kcf_global_swq_t
*gswq
; /* Global software queue */
40 /* Thread pool related variables */
41 static kcf_pool_t
*kcfpool
; /* Thread pool of kcfd LWPs */
42 int kcf_maxthreads
= 2;
43 int kcf_minthreads
= 1;
44 int kcf_thr_multiple
= 2; /* Boot-time tunable for experimentation */
45 static ulong_t kcf_idlethr_timeout
;
46 #define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */
48 /* kmem caches used by the scheduler */
49 static kmem_cache_t
*kcf_sreq_cache
;
50 static kmem_cache_t
*kcf_areq_cache
;
51 static kmem_cache_t
*kcf_context_cache
;
53 /* Global request ID table */
54 static kcf_reqid_table_t
*kcf_reqid_table
[REQID_TABLES
];
56 /* KCF stats. Not protected. */
57 static kcf_stats_t kcf_ksdata
= {
58 { "total threads in pool", KSTAT_DATA_UINT32
},
59 { "idle threads in pool", KSTAT_DATA_UINT32
},
60 { "min threads in pool", KSTAT_DATA_UINT32
},
61 { "max threads in pool", KSTAT_DATA_UINT32
},
62 { "requests in gswq", KSTAT_DATA_UINT32
},
63 { "max requests in gswq", KSTAT_DATA_UINT32
},
64 { "threads for HW taskq", KSTAT_DATA_UINT32
},
65 { "minalloc for HW taskq", KSTAT_DATA_UINT32
},
66 { "maxalloc for HW taskq", KSTAT_DATA_UINT32
}
69 static kstat_t
*kcf_misc_kstat
= NULL
;
70 ulong_t kcf_swprov_hndl
= 0;
72 static kcf_areq_node_t
*kcf_areqnode_alloc(kcf_provider_desc_t
*,
73 kcf_context_t
*, crypto_call_req_t
*, kcf_req_params_t
*, boolean_t
);
74 static int kcf_disp_sw_request(kcf_areq_node_t
*);
75 static void process_req_hwp(void *);
76 static int kcf_enqueue(kcf_areq_node_t
*);
77 static void kcfpool_alloc(void);
78 static void kcf_reqid_delete(kcf_areq_node_t
*areq
);
79 static crypto_req_id_t
kcf_reqid_insert(kcf_areq_node_t
*areq
);
80 static int kcf_misc_kstat_update(kstat_t
*ksp
, int rw
);
83 * Create a new context.
86 kcf_new_ctx(crypto_call_req_t
*crq
, kcf_provider_desc_t
*pd
,
87 crypto_session_id_t sid
)
90 kcf_context_t
*kcf_ctx
;
92 kcf_ctx
= kmem_cache_alloc(kcf_context_cache
,
93 (crq
== NULL
) ? KM_SLEEP
: KM_NOSLEEP
);
97 /* initialize the context for the consumer */
98 kcf_ctx
->kc_refcnt
= 1;
99 kcf_ctx
->kc_req_chain_first
= NULL
;
100 kcf_ctx
->kc_req_chain_last
= NULL
;
101 kcf_ctx
->kc_secondctx
= NULL
;
102 KCF_PROV_REFHOLD(pd
);
103 kcf_ctx
->kc_prov_desc
= pd
;
104 kcf_ctx
->kc_sw_prov_desc
= NULL
;
105 kcf_ctx
->kc_mech
= NULL
;
107 ctx
= &kcf_ctx
->kc_glbl_ctx
;
108 ctx
->cc_provider
= pd
->pd_prov_handle
;
109 ctx
->cc_session
= sid
;
110 ctx
->cc_provider_private
= NULL
;
111 ctx
->cc_framework_private
= (void *)kcf_ctx
;
113 ctx
->cc_opstate
= NULL
;
119 * Allocate a new async request node.
121 * ictx - Framework private context pointer
122 * crq - Has callback function and argument. Should be non NULL.
123 * req - The parameters to pass to the SPI
125 static kcf_areq_node_t
*
126 kcf_areqnode_alloc(kcf_provider_desc_t
*pd
, kcf_context_t
*ictx
,
127 crypto_call_req_t
*crq
, kcf_req_params_t
*req
, boolean_t isdual
)
129 kcf_areq_node_t
*arptr
, *areq
;
132 arptr
= kmem_cache_alloc(kcf_areq_cache
, KM_NOSLEEP
);
136 arptr
->an_state
= REQ_ALLOCATED
;
137 arptr
->an_reqarg
= *crq
;
138 arptr
->an_params
= *req
;
139 arptr
->an_context
= ictx
;
140 arptr
->an_isdual
= isdual
;
142 arptr
->an_next
= arptr
->an_prev
= NULL
;
143 KCF_PROV_REFHOLD(pd
);
144 arptr
->an_provider
= pd
;
145 arptr
->an_tried_plist
= NULL
;
146 arptr
->an_refcnt
= 1;
147 arptr
->an_idnext
= arptr
->an_idprev
= NULL
;
150 * Requests for context-less operations do not use the
151 * fields - an_is_my_turn, and an_ctxchain_next.
156 KCF_CONTEXT_REFHOLD(ictx
);
158 * Chain this request to the context.
160 mutex_enter(&ictx
->kc_in_use_lock
);
161 arptr
->an_ctxchain_next
= NULL
;
162 if ((areq
= ictx
->kc_req_chain_last
) == NULL
) {
163 arptr
->an_is_my_turn
= B_TRUE
;
164 ictx
->kc_req_chain_last
=
165 ictx
->kc_req_chain_first
= arptr
;
167 ASSERT(ictx
->kc_req_chain_first
!= NULL
);
168 arptr
->an_is_my_turn
= B_FALSE
;
169 /* Insert the new request to the end of the chain. */
170 areq
->an_ctxchain_next
= arptr
;
171 ictx
->kc_req_chain_last
= arptr
;
173 mutex_exit(&ictx
->kc_in_use_lock
);
179 * Queue the request node and do one of the following:
180 * - If there is an idle thread signal it to run.
181 * - If there is no idle thread and max running threads is not
182 * reached, signal the creator thread for more threads.
184 * If the two conditions above are not met, we don't need to do
185 * any thing. The request will be picked up by one of the
186 * worker threads when it becomes available.
189 kcf_disp_sw_request(kcf_areq_node_t
*areq
)
194 if ((err
= kcf_enqueue(areq
)) != 0)
197 if (kcfpool
->kp_idlethreads
> 0) {
198 /* Signal an idle thread to run */
199 mutex_enter(&gswq
->gs_lock
);
200 cv_signal(&gswq
->gs_cv
);
201 mutex_exit(&gswq
->gs_lock
);
203 return (CRYPTO_QUEUED
);
207 * We keep the number of running threads to be at
208 * kcf_minthreads to reduce gs_lock contention.
210 cnt
= kcf_minthreads
-
211 (kcfpool
->kp_threads
- kcfpool
->kp_blockedthreads
);
214 * The following ensures the number of threads in pool
215 * does not exceed kcf_maxthreads.
217 cnt
= MIN(cnt
, kcf_maxthreads
- (int)kcfpool
->kp_threads
);
219 /* Signal the creator thread for more threads */
220 mutex_enter(&kcfpool
->kp_user_lock
);
221 if (!kcfpool
->kp_signal_create_thread
) {
222 kcfpool
->kp_signal_create_thread
= B_TRUE
;
223 kcfpool
->kp_nthrs
= cnt
;
224 cv_signal(&kcfpool
->kp_user_cv
);
226 mutex_exit(&kcfpool
->kp_user_lock
);
230 return (CRYPTO_QUEUED
);
234 * This routine is called by the taskq associated with
235 * each hardware provider. We notify the kernel consumer
236 * via the callback routine in case of CRYPTO_SUCCESS or
239 * A request can be of type kcf_areq_node_t or of type
243 process_req_hwp(void *ireq
)
247 kcf_call_type_t ctype
;
248 kcf_provider_desc_t
*pd
;
249 kcf_areq_node_t
*areq
= (kcf_areq_node_t
*)ireq
;
250 kcf_sreq_node_t
*sreq
= (kcf_sreq_node_t
*)ireq
;
252 pd
= ((ctype
= GET_REQ_TYPE(ireq
)) == CRYPTO_SYNCH
) ?
253 sreq
->sn_provider
: areq
->an_provider
;
256 * Wait if flow control is in effect for the provider. A
257 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
258 * notification will signal us. We also get signaled if
259 * the provider is unregistering.
261 if (pd
->pd_state
== KCF_PROV_BUSY
) {
262 mutex_enter(&pd
->pd_lock
);
263 while (pd
->pd_state
== KCF_PROV_BUSY
)
264 cv_wait(&pd
->pd_resume_cv
, &pd
->pd_lock
);
265 mutex_exit(&pd
->pd_lock
);
269 * Bump the internal reference count while the request is being
270 * processed. This is how we know when it's safe to unregister
271 * a provider. This step must precede the pd_state check below.
273 KCF_PROV_IREFHOLD(pd
);
276 * Fail the request if the provider has failed. We return a
277 * recoverable error and the notified clients attempt any
278 * recovery. For async clients this is done in kcf_aop_done()
279 * and for sync clients it is done in the k-api routines.
281 if (pd
->pd_state
>= KCF_PROV_FAILED
) {
282 error
= CRYPTO_DEVICE_ERROR
;
286 if (ctype
== CRYPTO_SYNCH
) {
287 mutex_enter(&sreq
->sn_lock
);
288 sreq
->sn_state
= REQ_INPROGRESS
;
289 mutex_exit(&sreq
->sn_lock
);
291 ctx
= sreq
->sn_context
? &sreq
->sn_context
->kc_glbl_ctx
: NULL
;
292 error
= common_submit_request(sreq
->sn_provider
, ctx
,
293 sreq
->sn_params
, sreq
);
296 ASSERT(ctype
== CRYPTO_ASYNCH
);
299 * We are in the per-hardware provider thread context and
300 * hence can sleep. Note that the caller would have done
301 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
303 ctx
= (ictx
= areq
->an_context
) ? &ictx
->kc_glbl_ctx
: NULL
;
305 mutex_enter(&areq
->an_lock
);
307 * We need to maintain ordering for multi-part requests.
308 * an_is_my_turn is set to B_TRUE initially for a request
309 * when it is enqueued and there are no other requests
310 * for that context. It is set later from kcf_aop_done() when
311 * the request before us in the chain of requests for the
312 * context completes. We get signaled at that point.
315 ASSERT(ictx
->kc_prov_desc
== areq
->an_provider
);
317 while (areq
->an_is_my_turn
== B_FALSE
) {
318 cv_wait(&areq
->an_turn_cv
, &areq
->an_lock
);
321 areq
->an_state
= REQ_INPROGRESS
;
322 mutex_exit(&areq
->an_lock
);
324 error
= common_submit_request(areq
->an_provider
, ctx
,
325 &areq
->an_params
, areq
);
329 if (error
== CRYPTO_QUEUED
) {
331 * The request is queued by the provider and we should
332 * get a crypto_op_notification() from the provider later.
333 * We notify the consumer at that time.
336 } else { /* CRYPTO_SUCCESS or other failure */
337 KCF_PROV_IREFRELE(pd
);
338 if (ctype
== CRYPTO_SYNCH
)
339 kcf_sop_done(sreq
, error
);
341 kcf_aop_done(areq
, error
);
346 * This routine checks if a request can be retried on another
347 * provider. If true, mech1 is initialized to point to the mechanism
348 * structure. mech2 is also initialized in case of a dual operation. fg
349 * is initialized to the correct crypto_func_group_t bit flag. They are
350 * initialized by this routine, so that the caller can pass them to a
351 * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
353 * We check that the request is for a init or atomic routine and that
354 * it is for one of the operation groups used from k-api .
357 can_resubmit(kcf_areq_node_t
*areq
, crypto_mechanism_t
**mech1
,
358 crypto_mechanism_t
**mech2
, crypto_func_group_t
*fg
)
360 kcf_req_params_t
*params
;
361 kcf_op_type_t optype
;
363 params
= &areq
->an_params
;
364 optype
= params
->rp_optype
;
366 if (!(IS_INIT_OP(optype
) || IS_ATOMIC_OP(optype
)))
369 switch (params
->rp_opgrp
) {
370 case KCF_OG_DIGEST
: {
371 kcf_digest_ops_params_t
*dops
= ¶ms
->rp_u
.digest_params
;
373 dops
->do_mech
.cm_type
= dops
->do_framework_mechtype
;
374 *mech1
= &dops
->do_mech
;
375 *fg
= (optype
== KCF_OP_INIT
) ? CRYPTO_FG_DIGEST
:
376 CRYPTO_FG_DIGEST_ATOMIC
;
381 kcf_mac_ops_params_t
*mops
= ¶ms
->rp_u
.mac_params
;
383 mops
->mo_mech
.cm_type
= mops
->mo_framework_mechtype
;
384 *mech1
= &mops
->mo_mech
;
385 *fg
= (optype
== KCF_OP_INIT
) ? CRYPTO_FG_MAC
:
386 CRYPTO_FG_MAC_ATOMIC
;
391 kcf_sign_ops_params_t
*sops
= ¶ms
->rp_u
.sign_params
;
393 sops
->so_mech
.cm_type
= sops
->so_framework_mechtype
;
394 *mech1
= &sops
->so_mech
;
397 *fg
= CRYPTO_FG_SIGN
;
400 *fg
= CRYPTO_FG_SIGN_ATOMIC
;
403 ASSERT(optype
== KCF_OP_SIGN_RECOVER_ATOMIC
);
404 *fg
= CRYPTO_FG_SIGN_RECOVER_ATOMIC
;
409 case KCF_OG_VERIFY
: {
410 kcf_verify_ops_params_t
*vops
= ¶ms
->rp_u
.verify_params
;
412 vops
->vo_mech
.cm_type
= vops
->vo_framework_mechtype
;
413 *mech1
= &vops
->vo_mech
;
416 *fg
= CRYPTO_FG_VERIFY
;
419 *fg
= CRYPTO_FG_VERIFY_ATOMIC
;
422 ASSERT(optype
== KCF_OP_VERIFY_RECOVER_ATOMIC
);
423 *fg
= CRYPTO_FG_VERIFY_RECOVER_ATOMIC
;
428 case KCF_OG_ENCRYPT
: {
429 kcf_encrypt_ops_params_t
*eops
= ¶ms
->rp_u
.encrypt_params
;
431 eops
->eo_mech
.cm_type
= eops
->eo_framework_mechtype
;
432 *mech1
= &eops
->eo_mech
;
433 *fg
= (optype
== KCF_OP_INIT
) ? CRYPTO_FG_ENCRYPT
:
434 CRYPTO_FG_ENCRYPT_ATOMIC
;
438 case KCF_OG_DECRYPT
: {
439 kcf_decrypt_ops_params_t
*dcrops
= ¶ms
->rp_u
.decrypt_params
;
441 dcrops
->dop_mech
.cm_type
= dcrops
->dop_framework_mechtype
;
442 *mech1
= &dcrops
->dop_mech
;
443 *fg
= (optype
== KCF_OP_INIT
) ? CRYPTO_FG_DECRYPT
:
444 CRYPTO_FG_DECRYPT_ATOMIC
;
448 case KCF_OG_ENCRYPT_MAC
: {
449 kcf_encrypt_mac_ops_params_t
*eops
=
450 ¶ms
->rp_u
.encrypt_mac_params
;
452 eops
->em_encr_mech
.cm_type
= eops
->em_framework_encr_mechtype
;
453 *mech1
= &eops
->em_encr_mech
;
454 eops
->em_mac_mech
.cm_type
= eops
->em_framework_mac_mechtype
;
455 *mech2
= &eops
->em_mac_mech
;
456 *fg
= (optype
== KCF_OP_INIT
) ? CRYPTO_FG_ENCRYPT_MAC
:
457 CRYPTO_FG_ENCRYPT_MAC_ATOMIC
;
461 case KCF_OG_MAC_DECRYPT
: {
462 kcf_mac_decrypt_ops_params_t
*dops
=
463 ¶ms
->rp_u
.mac_decrypt_params
;
465 dops
->md_mac_mech
.cm_type
= dops
->md_framework_mac_mechtype
;
466 *mech1
= &dops
->md_mac_mech
;
467 dops
->md_decr_mech
.cm_type
= dops
->md_framework_decr_mechtype
;
468 *mech2
= &dops
->md_decr_mech
;
469 *fg
= (optype
== KCF_OP_INIT
) ? CRYPTO_FG_MAC_DECRYPT
:
470 CRYPTO_FG_MAC_DECRYPT_ATOMIC
;
482 * This routine is called when a request to a provider has failed
483 * with a recoverable error. This routine tries to find another provider
484 * and dispatches the request to the new provider, if one is available.
485 * We reuse the request structure.
487 * A return value of NULL from kcf_get_mech_provider() indicates
488 * we have tried the last provider.
491 kcf_resubmit_request(kcf_areq_node_t
*areq
)
493 int error
= CRYPTO_FAILED
;
495 kcf_provider_desc_t
*old_pd
;
496 kcf_provider_desc_t
*new_pd
;
497 crypto_mechanism_t
*mech1
= NULL
, *mech2
= NULL
;
498 crypto_mech_type_t prov_mt1
, prov_mt2
;
499 crypto_func_group_t fg
;
501 if (!can_resubmit(areq
, &mech1
, &mech2
, &fg
))
504 old_pd
= areq
->an_provider
;
506 * Add old_pd to the list of providers already tried. We release
507 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
508 * kcf_free_triedlist().
510 if (kcf_insert_triedlist(&areq
->an_tried_plist
, old_pd
,
514 if (mech1
&& !mech2
) {
515 new_pd
= kcf_get_mech_provider(mech1
->cm_type
, NULL
, &error
,
516 areq
->an_tried_plist
, fg
,
517 (areq
->an_reqarg
.cr_flag
& CRYPTO_RESTRICTED
), 0);
519 ASSERT(mech1
!= NULL
&& mech2
!= NULL
);
521 new_pd
= kcf_get_dual_provider(mech1
, mech2
, NULL
, &prov_mt1
,
522 &prov_mt2
, &error
, areq
->an_tried_plist
, fg
, fg
,
523 (areq
->an_reqarg
.cr_flag
& CRYPTO_RESTRICTED
), 0);
530 * We reuse the old context by resetting provider specific
533 if ((ictx
= areq
->an_context
) != NULL
) {
536 ASSERT(old_pd
== ictx
->kc_prov_desc
);
537 KCF_PROV_REFRELE(ictx
->kc_prov_desc
);
538 KCF_PROV_REFHOLD(new_pd
);
539 ictx
->kc_prov_desc
= new_pd
;
541 ctx
= &ictx
->kc_glbl_ctx
;
542 ctx
->cc_provider
= new_pd
->pd_prov_handle
;
543 ctx
->cc_session
= new_pd
->pd_sid
;
544 ctx
->cc_provider_private
= NULL
;
547 /* We reuse areq. by resetting the provider and context fields. */
548 KCF_PROV_REFRELE(old_pd
);
549 KCF_PROV_REFHOLD(new_pd
);
550 areq
->an_provider
= new_pd
;
551 mutex_enter(&areq
->an_lock
);
552 areq
->an_state
= REQ_WAITING
;
553 mutex_exit(&areq
->an_lock
);
555 switch (new_pd
->pd_prov_type
) {
556 case CRYPTO_SW_PROVIDER
:
557 error
= kcf_disp_sw_request(areq
);
560 case CRYPTO_HW_PROVIDER
: {
561 taskq_t
*taskq
= new_pd
->pd_sched_info
.ks_taskq
;
563 if (taskq_dispatch(taskq
, process_req_hwp
, areq
, TQ_NOSLEEP
) ==
565 error
= CRYPTO_HOST_MEMORY
;
567 error
= CRYPTO_QUEUED
;
579 static inline int EMPTY_TASKQ(taskq_t
*tq
)
582 return (tq
->tq_lowest_id
== tq
->tq_next_id
);
584 return (tq
->tq_task
.tqent_next
== &tq
->tq_task
|| tq
->tq_active
== 0);
589 * Routine called by both ioctl and k-api. The consumer should
590 * bundle the parameters into a kcf_req_params_t structure. A bunch
591 * of macros are available in ops_impl.h for this bundling. They are:
593 * KCF_WRAP_DIGEST_OPS_PARAMS()
594 * KCF_WRAP_MAC_OPS_PARAMS()
595 * KCF_WRAP_ENCRYPT_OPS_PARAMS()
596 * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
598 * It is the caller's responsibility to free the ctx argument when
599 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
602 kcf_submit_request(kcf_provider_desc_t
*pd
, crypto_ctx_t
*ctx
,
603 crypto_call_req_t
*crq
, kcf_req_params_t
*params
, boolean_t cont
)
605 int error
= CRYPTO_SUCCESS
;
606 kcf_areq_node_t
*areq
;
607 kcf_sreq_node_t
*sreq
;
608 kcf_context_t
*kcf_ctx
;
609 taskq_t
*taskq
= pd
->pd_sched_info
.ks_taskq
;
611 kcf_ctx
= ctx
? (kcf_context_t
*)ctx
->cc_framework_private
: NULL
;
613 /* Synchronous cases */
615 switch (pd
->pd_prov_type
) {
616 case CRYPTO_SW_PROVIDER
:
617 error
= common_submit_request(pd
, ctx
, params
,
618 KCF_RHNDL(KM_SLEEP
));
621 case CRYPTO_HW_PROVIDER
:
623 * Special case for CRYPTO_SYNCHRONOUS providers that
624 * never return a CRYPTO_QUEUED error. We skip any
625 * request allocation and call the SPI directly.
627 if ((pd
->pd_flags
& CRYPTO_SYNCHRONOUS
) &&
628 EMPTY_TASKQ(taskq
)) {
629 KCF_PROV_IREFHOLD(pd
);
630 if (pd
->pd_state
== KCF_PROV_READY
) {
631 error
= common_submit_request(pd
, ctx
,
632 params
, KCF_RHNDL(KM_SLEEP
));
633 KCF_PROV_IREFRELE(pd
);
634 ASSERT(error
!= CRYPTO_QUEUED
);
637 KCF_PROV_IREFRELE(pd
);
640 sreq
= kmem_cache_alloc(kcf_sreq_cache
, KM_SLEEP
);
641 sreq
->sn_state
= REQ_ALLOCATED
;
642 sreq
->sn_rv
= CRYPTO_FAILED
;
643 sreq
->sn_params
= params
;
646 * Note that we do not need to hold the context
647 * for synchronous case as the context will never
648 * become invalid underneath us. We do not need to hold
649 * the provider here either as the caller has a hold.
651 sreq
->sn_context
= kcf_ctx
;
652 ASSERT(KCF_PROV_REFHELD(pd
));
653 sreq
->sn_provider
= pd
;
655 ASSERT(taskq
!= NULL
);
657 * Call the SPI directly if the taskq is empty and the
658 * provider is not busy, else dispatch to the taskq.
659 * Calling directly is fine as this is the synchronous
660 * case. This is unlike the asynchronous case where we
661 * must always dispatch to the taskq.
663 if (EMPTY_TASKQ(taskq
) &&
664 pd
->pd_state
== KCF_PROV_READY
) {
665 process_req_hwp(sreq
);
668 * We can not tell from taskq_dispatch() return
669 * value if we exceeded maxalloc. Hence the
670 * check here. Since we are allowed to wait in
671 * the synchronous case, we wait for the taskq
674 if (taskq
->tq_nalloc
>= crypto_taskq_maxalloc
) {
678 (void) taskq_dispatch(taskq
, process_req_hwp
,
683 * Wait for the notification to arrive,
684 * if the operation is not done yet.
685 * Bug# 4722589 will make the wait a cv_wait_sig().
687 mutex_enter(&sreq
->sn_lock
);
688 while (sreq
->sn_state
< REQ_DONE
)
689 cv_wait(&sreq
->sn_cv
, &sreq
->sn_lock
);
690 mutex_exit(&sreq
->sn_lock
);
693 kmem_cache_free(kcf_sreq_cache
, sreq
);
698 error
= CRYPTO_FAILED
;
702 } else { /* Asynchronous cases */
703 switch (pd
->pd_prov_type
) {
704 case CRYPTO_SW_PROVIDER
:
705 if (!(crq
->cr_flag
& CRYPTO_ALWAYS_QUEUE
)) {
707 * This case has less overhead since there is
708 * no switching of context.
710 error
= common_submit_request(pd
, ctx
, params
,
711 KCF_RHNDL(KM_NOSLEEP
));
714 * CRYPTO_ALWAYS_QUEUE is set. We need to
715 * queue the request and return.
717 areq
= kcf_areqnode_alloc(pd
, kcf_ctx
, crq
,
720 error
= CRYPTO_HOST_MEMORY
;
723 & CRYPTO_SKIP_REQID
)) {
725 * Set the request handle. This handle
726 * is used for any crypto_cancel_req(9f)
727 * calls from the consumer. We have to
728 * do this before dispatching the
731 crq
->cr_reqid
= kcf_reqid_insert(areq
);
734 error
= kcf_disp_sw_request(areq
);
736 * There is an error processing this
737 * request. Remove the handle and
738 * release the request structure.
740 if (error
!= CRYPTO_QUEUED
) {
742 & CRYPTO_SKIP_REQID
))
743 kcf_reqid_delete(areq
);
744 KCF_AREQ_REFRELE(areq
);
750 case CRYPTO_HW_PROVIDER
:
752 * We need to queue the request and return.
754 areq
= kcf_areqnode_alloc(pd
, kcf_ctx
, crq
, params
,
757 error
= CRYPTO_HOST_MEMORY
;
761 ASSERT(taskq
!= NULL
);
763 * We can not tell from taskq_dispatch() return
764 * value if we exceeded maxalloc. Hence the check
767 if (taskq
->tq_nalloc
>= crypto_taskq_maxalloc
) {
769 KCF_AREQ_REFRELE(areq
);
773 if (!(crq
->cr_flag
& CRYPTO_SKIP_REQID
)) {
775 * Set the request handle. This handle is used
776 * for any crypto_cancel_req(9f) calls from the
777 * consumer. We have to do this before dispatching
780 crq
->cr_reqid
= kcf_reqid_insert(areq
);
783 if (taskq_dispatch(taskq
,
784 process_req_hwp
, areq
, TQ_NOSLEEP
) ==
786 error
= CRYPTO_HOST_MEMORY
;
787 if (!(crq
->cr_flag
& CRYPTO_SKIP_REQID
))
788 kcf_reqid_delete(areq
);
789 KCF_AREQ_REFRELE(areq
);
791 error
= CRYPTO_QUEUED
;
796 error
= CRYPTO_FAILED
;
806 * We're done with this framework context, so free it. Note that freeing
807 * framework context (kcf_context) frees the global context (crypto_ctx).
809 * The provider is responsible for freeing provider private context after a
810 * final or single operation and resetting the cc_provider_private field
811 * to NULL. It should do this before it notifies the framework of the
812 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
813 * like crypto_cancel_ctx(9f).
816 kcf_free_context(kcf_context_t
*kcf_ctx
)
818 kcf_provider_desc_t
*pd
= kcf_ctx
->kc_prov_desc
;
819 crypto_ctx_t
*gctx
= &kcf_ctx
->kc_glbl_ctx
;
820 kcf_context_t
*kcf_secondctx
= kcf_ctx
->kc_secondctx
;
822 /* Release the second context, if any */
824 if (kcf_secondctx
!= NULL
)
825 KCF_CONTEXT_REFRELE(kcf_secondctx
);
827 if (gctx
->cc_provider_private
!= NULL
) {
828 mutex_enter(&pd
->pd_lock
);
829 if (!KCF_IS_PROV_REMOVED(pd
)) {
831 * Increment the provider's internal refcnt so it
832 * doesn't unregister from the framework while
833 * we're calling the entry point.
835 KCF_PROV_IREFHOLD(pd
);
836 mutex_exit(&pd
->pd_lock
);
837 (void) KCF_PROV_FREE_CONTEXT(pd
, gctx
);
838 KCF_PROV_IREFRELE(pd
);
840 mutex_exit(&pd
->pd_lock
);
844 /* kcf_ctx->kc_prov_desc has a hold on pd */
845 KCF_PROV_REFRELE(kcf_ctx
->kc_prov_desc
);
847 /* check if this context is shared with a software provider */
848 if ((gctx
->cc_flags
& CRYPTO_INIT_OPSTATE
) &&
849 kcf_ctx
->kc_sw_prov_desc
!= NULL
) {
850 KCF_PROV_REFRELE(kcf_ctx
->kc_sw_prov_desc
);
853 kmem_cache_free(kcf_context_cache
, kcf_ctx
);
857 * Free the request after releasing all the holds.
860 kcf_free_req(kcf_areq_node_t
*areq
)
862 KCF_PROV_REFRELE(areq
->an_provider
);
863 if (areq
->an_context
!= NULL
)
864 KCF_CONTEXT_REFRELE(areq
->an_context
);
866 if (areq
->an_tried_plist
!= NULL
)
867 kcf_free_triedlist(areq
->an_tried_plist
);
868 kmem_cache_free(kcf_areq_cache
, areq
);
872 * Utility routine to remove a request from the chain of requests
873 * hanging off a context.
876 kcf_removereq_in_ctxchain(kcf_context_t
*ictx
, kcf_areq_node_t
*areq
)
878 kcf_areq_node_t
*cur
, *prev
;
881 * Get context lock, search for areq in the chain and remove it.
883 ASSERT(ictx
!= NULL
);
884 mutex_enter(&ictx
->kc_in_use_lock
);
885 prev
= cur
= ictx
->kc_req_chain_first
;
887 while (cur
!= NULL
) {
890 if ((ictx
->kc_req_chain_first
=
891 cur
->an_ctxchain_next
) == NULL
)
892 ictx
->kc_req_chain_last
= NULL
;
894 if (cur
== ictx
->kc_req_chain_last
)
895 ictx
->kc_req_chain_last
= prev
;
896 prev
->an_ctxchain_next
= cur
->an_ctxchain_next
;
902 cur
= cur
->an_ctxchain_next
;
904 mutex_exit(&ictx
->kc_in_use_lock
);
908 * Remove the specified node from the global software queue.
910 * The caller must hold the queue lock and request lock (an_lock).
913 kcf_remove_node(kcf_areq_node_t
*node
)
915 kcf_areq_node_t
*nextp
= node
->an_next
;
916 kcf_areq_node_t
*prevp
= node
->an_prev
;
919 nextp
->an_prev
= prevp
;
921 gswq
->gs_last
= prevp
;
924 prevp
->an_next
= nextp
;
926 gswq
->gs_first
= nextp
;
928 node
->an_state
= REQ_CANCELED
;
932 * Add the request node to the end of the global software queue.
934 * The caller should not hold the queue lock. Returns 0 if the
935 * request is successfully queued. Returns CRYPTO_BUSY if the limit
936 * on the number of jobs is exceeded.
939 kcf_enqueue(kcf_areq_node_t
*node
)
941 kcf_areq_node_t
*tnode
;
943 mutex_enter(&gswq
->gs_lock
);
945 if (gswq
->gs_njobs
>= gswq
->gs_maxjobs
) {
946 mutex_exit(&gswq
->gs_lock
);
947 return (CRYPTO_BUSY
);
950 if (gswq
->gs_last
== NULL
) {
951 gswq
->gs_first
= gswq
->gs_last
= node
;
953 ASSERT(gswq
->gs_last
->an_next
== NULL
);
954 tnode
= gswq
->gs_last
;
955 tnode
->an_next
= node
;
956 gswq
->gs_last
= node
;
957 node
->an_prev
= tnode
;
962 /* an_lock not needed here as we hold gs_lock */
963 node
->an_state
= REQ_WAITING
;
965 mutex_exit(&gswq
->gs_lock
);
971 * kmem_cache_alloc constructor for sync request structure.
975 kcf_sreq_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
977 kcf_sreq_node_t
*sreq
= (kcf_sreq_node_t
*)buf
;
979 sreq
->sn_type
= CRYPTO_SYNCH
;
980 cv_init(&sreq
->sn_cv
, NULL
, CV_DEFAULT
, NULL
);
981 mutex_init(&sreq
->sn_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
988 kcf_sreq_cache_destructor(void *buf
, void *cdrarg
)
990 kcf_sreq_node_t
*sreq
= (kcf_sreq_node_t
*)buf
;
992 mutex_destroy(&sreq
->sn_lock
);
993 cv_destroy(&sreq
->sn_cv
);
997 * kmem_cache_alloc constructor for async request structure.
1001 kcf_areq_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
1003 kcf_areq_node_t
*areq
= (kcf_areq_node_t
*)buf
;
1005 areq
->an_type
= CRYPTO_ASYNCH
;
1006 areq
->an_refcnt
= 0;
1007 mutex_init(&areq
->an_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1008 cv_init(&areq
->an_done
, NULL
, CV_DEFAULT
, NULL
);
1009 cv_init(&areq
->an_turn_cv
, NULL
, CV_DEFAULT
, NULL
);
1016 kcf_areq_cache_destructor(void *buf
, void *cdrarg
)
1018 kcf_areq_node_t
*areq
= (kcf_areq_node_t
*)buf
;
1020 ASSERT(areq
->an_refcnt
== 0);
1021 mutex_destroy(&areq
->an_lock
);
1022 cv_destroy(&areq
->an_done
);
1023 cv_destroy(&areq
->an_turn_cv
);
1027 * kmem_cache_alloc constructor for kcf_context structure.
1031 kcf_context_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
1033 kcf_context_t
*kctx
= (kcf_context_t
*)buf
;
1035 kctx
->kc_refcnt
= 0;
1036 mutex_init(&kctx
->kc_in_use_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1043 kcf_context_cache_destructor(void *buf
, void *cdrarg
)
1045 kcf_context_t
*kctx
= (kcf_context_t
*)buf
;
1047 ASSERT(kctx
->kc_refcnt
== 0);
1048 mutex_destroy(&kctx
->kc_in_use_lock
);
1052 kcf_sched_destroy(void)
1057 kstat_delete(kcf_misc_kstat
);
1060 kmem_free(kcfpool
, sizeof (kcf_pool_t
));
1062 for (i
= 0; i
< REQID_TABLES
; i
++) {
1063 if (kcf_reqid_table
[i
])
1064 kmem_free(kcf_reqid_table
[i
],
1065 sizeof (kcf_reqid_table_t
));
1069 kmem_free(gswq
, sizeof (kcf_global_swq_t
));
1071 if (kcf_context_cache
)
1072 kmem_cache_destroy(kcf_context_cache
);
1074 kmem_cache_destroy(kcf_areq_cache
);
1076 kmem_cache_destroy(kcf_sreq_cache
);
1080 * Creates and initializes all the structures needed by the framework.
1083 kcf_sched_init(void)
1086 kcf_reqid_table_t
*rt
;
1089 * Create all the kmem caches needed by the framework. We set the
1090 * align argument to 64, to get a slab aligned to 64-byte as well as
1091 * have the objects (cache_chunksize) to be a 64-byte multiple.
1092 * This helps to avoid false sharing as this is the size of the
1095 kcf_sreq_cache
= kmem_cache_create("kcf_sreq_cache",
1096 sizeof (struct kcf_sreq_node
), 64, kcf_sreq_cache_constructor
,
1097 kcf_sreq_cache_destructor
, NULL
, NULL
, NULL
, 0);
1099 kcf_areq_cache
= kmem_cache_create("kcf_areq_cache",
1100 sizeof (struct kcf_areq_node
), 64, kcf_areq_cache_constructor
,
1101 kcf_areq_cache_destructor
, NULL
, NULL
, NULL
, 0);
1103 kcf_context_cache
= kmem_cache_create("kcf_context_cache",
1104 sizeof (struct kcf_context
), 64, kcf_context_cache_constructor
,
1105 kcf_context_cache_destructor
, NULL
, NULL
, NULL
, 0);
1107 gswq
= kmem_alloc(sizeof (kcf_global_swq_t
), KM_SLEEP
);
1109 mutex_init(&gswq
->gs_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1110 cv_init(&gswq
->gs_cv
, NULL
, CV_DEFAULT
, NULL
);
1112 gswq
->gs_maxjobs
= kcf_maxthreads
* crypto_taskq_maxalloc
;
1113 gswq
->gs_first
= gswq
->gs_last
= NULL
;
1115 /* Initialize the global reqid table */
1116 for (i
= 0; i
< REQID_TABLES
; i
++) {
1117 rt
= kmem_zalloc(sizeof (kcf_reqid_table_t
), KM_SLEEP
);
1118 kcf_reqid_table
[i
] = rt
;
1119 mutex_init(&rt
->rt_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1123 /* Allocate and initialize the thread pool */
1126 /* Initialize the event notification list variables */
1127 mutex_init(&ntfy_list_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1128 cv_init(&ntfy_list_cv
, NULL
, CV_DEFAULT
, NULL
);
1130 /* Create the kcf kstat */
1131 kcf_misc_kstat
= kstat_create("kcf", 0, "framework_stats", "crypto",
1132 KSTAT_TYPE_NAMED
, sizeof (kcf_stats_t
) / sizeof (kstat_named_t
),
1133 KSTAT_FLAG_VIRTUAL
);
1135 if (kcf_misc_kstat
!= NULL
) {
1136 kcf_misc_kstat
->ks_data
= &kcf_ksdata
;
1137 kcf_misc_kstat
->ks_update
= kcf_misc_kstat_update
;
1138 kstat_install(kcf_misc_kstat
);
1143 * Signal the waiting sync client.
1146 kcf_sop_done(kcf_sreq_node_t
*sreq
, int error
)
1148 mutex_enter(&sreq
->sn_lock
);
1149 sreq
->sn_state
= REQ_DONE
;
1150 sreq
->sn_rv
= error
;
1151 cv_signal(&sreq
->sn_cv
);
1152 mutex_exit(&sreq
->sn_lock
);
1156 * Callback the async client with the operation status.
1157 * We free the async request node and possibly the context.
1158 * We also handle any chain of requests hanging off of
1162 kcf_aop_done(kcf_areq_node_t
*areq
, int error
)
1164 kcf_op_type_t optype
;
1165 boolean_t skip_notify
= B_FALSE
;
1166 kcf_context_t
*ictx
;
1167 kcf_areq_node_t
*nextreq
;
1170 * Handle recoverable errors. This has to be done first
1171 * before doing any thing else in this routine so that
1172 * we do not change the state of the request.
1174 if (error
!= CRYPTO_SUCCESS
&& IS_RECOVERABLE(error
)) {
1176 * We try another provider, if one is available. Else
1177 * we continue with the failure notification to the
1180 if (kcf_resubmit_request(areq
) == CRYPTO_QUEUED
)
1184 mutex_enter(&areq
->an_lock
);
1185 areq
->an_state
= REQ_DONE
;
1186 mutex_exit(&areq
->an_lock
);
1188 optype
= (&areq
->an_params
)->rp_optype
;
1189 if ((ictx
= areq
->an_context
) != NULL
) {
1191 * A request after it is removed from the request
1192 * queue, still stays on a chain of requests hanging
1193 * of its context structure. It needs to be removed
1194 * from this chain at this point.
1196 mutex_enter(&ictx
->kc_in_use_lock
);
1197 nextreq
= areq
->an_ctxchain_next
;
1198 if (nextreq
!= NULL
) {
1199 mutex_enter(&nextreq
->an_lock
);
1200 nextreq
->an_is_my_turn
= B_TRUE
;
1201 cv_signal(&nextreq
->an_turn_cv
);
1202 mutex_exit(&nextreq
->an_lock
);
1205 ictx
->kc_req_chain_first
= nextreq
;
1206 if (nextreq
== NULL
)
1207 ictx
->kc_req_chain_last
= NULL
;
1208 mutex_exit(&ictx
->kc_in_use_lock
);
1210 if (IS_SINGLE_OP(optype
) || IS_FINAL_OP(optype
)) {
1211 ASSERT(nextreq
== NULL
);
1212 KCF_CONTEXT_REFRELE(ictx
);
1213 } else if (error
!= CRYPTO_SUCCESS
&& IS_INIT_OP(optype
)) {
1215 * NOTE - We do not release the context in case of update
1216 * operations. We require the consumer to free it explicitly,
1217 * in case it wants to abandon an update operation. This is done
1218 * as there may be mechanisms in ECB mode that can continue
1219 * even if an operation on a block fails.
1221 KCF_CONTEXT_REFRELE(ictx
);
1225 /* Deal with the internal continuation to this request first */
1227 if (areq
->an_isdual
) {
1228 kcf_dual_req_t
*next_arg
;
1229 next_arg
= (kcf_dual_req_t
*)areq
->an_reqarg
.cr_callback_arg
;
1230 next_arg
->kr_areq
= areq
;
1231 KCF_AREQ_REFHOLD(areq
);
1232 areq
->an_isdual
= B_FALSE
;
1234 NOTIFY_CLIENT(areq
, error
);
1239 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
1240 * always. If this flag is clear, we skip the notification
1241 * provided there are no errors. We check this flag for only
1242 * init or update operations. It is ignored for single, final or
1243 * atomic operations.
1245 skip_notify
= (IS_UPDATE_OP(optype
) || IS_INIT_OP(optype
)) &&
1246 (!(areq
->an_reqarg
.cr_flag
& CRYPTO_NOTIFY_OPDONE
)) &&
1247 (error
== CRYPTO_SUCCESS
);
1250 NOTIFY_CLIENT(areq
, error
);
1253 if (!(areq
->an_reqarg
.cr_flag
& CRYPTO_SKIP_REQID
))
1254 kcf_reqid_delete(areq
);
1256 KCF_AREQ_REFRELE(areq
);
1260 * Allocate the thread pool and initialize all the fields.
1265 kcfpool
= kmem_alloc(sizeof (kcf_pool_t
), KM_SLEEP
);
1267 kcfpool
->kp_threads
= kcfpool
->kp_idlethreads
= 0;
1268 kcfpool
->kp_blockedthreads
= 0;
1269 kcfpool
->kp_signal_create_thread
= B_FALSE
;
1270 kcfpool
->kp_nthrs
= 0;
1271 kcfpool
->kp_user_waiting
= B_FALSE
;
1273 mutex_init(&kcfpool
->kp_thread_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1274 cv_init(&kcfpool
->kp_nothr_cv
, NULL
, CV_DEFAULT
, NULL
);
1276 mutex_init(&kcfpool
->kp_user_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1277 cv_init(&kcfpool
->kp_user_cv
, NULL
, CV_DEFAULT
, NULL
);
1279 kcf_idlethr_timeout
= KCF_DEFAULT_THRTIMEOUT
;
1283 * Insert the async request in the hash table after assigning it
1284 * an ID. Returns the ID.
1286 * The ID is used by the caller to pass as an argument to a
1287 * cancel_req() routine later.
1289 static crypto_req_id_t
1290 kcf_reqid_insert(kcf_areq_node_t
*areq
)
1294 kcf_areq_node_t
*headp
;
1295 kcf_reqid_table_t
*rt
=
1296 kcf_reqid_table
[CPU_SEQID
& REQID_TABLE_MASK
];
1298 mutex_enter(&rt
->rt_lock
);
1301 (rt
->rt_curid
- REQID_COUNTER_LOW
) | REQID_COUNTER_HIGH
;
1302 SET_REQID(areq
, id
);
1303 indx
= REQID_HASH(id
);
1304 headp
= areq
->an_idnext
= rt
->rt_idhash
[indx
];
1305 areq
->an_idprev
= NULL
;
1307 headp
->an_idprev
= areq
;
1309 rt
->rt_idhash
[indx
] = areq
;
1310 mutex_exit(&rt
->rt_lock
);
1316 * Delete the async request from the hash table.
1319 kcf_reqid_delete(kcf_areq_node_t
*areq
)
1322 kcf_areq_node_t
*nextp
, *prevp
;
1323 crypto_req_id_t id
= GET_REQID(areq
);
1324 kcf_reqid_table_t
*rt
;
1326 rt
= kcf_reqid_table
[id
& REQID_TABLE_MASK
];
1327 indx
= REQID_HASH(id
);
1329 mutex_enter(&rt
->rt_lock
);
1331 nextp
= areq
->an_idnext
;
1332 prevp
= areq
->an_idprev
;
1334 nextp
->an_idprev
= prevp
;
1336 prevp
->an_idnext
= nextp
;
1338 rt
->rt_idhash
[indx
] = nextp
;
1341 cv_broadcast(&areq
->an_done
);
1343 mutex_exit(&rt
->rt_lock
);
1347 * Cancel a single asynchronous request.
1349 * We guarantee that no problems will result from calling
1350 * crypto_cancel_req() for a request which is either running, or
1351 * has already completed. We remove the request from any queues
1352 * if it is possible. We wait for request completion if the
1353 * request is dispatched to a provider.
1356 * Can be called from user context only.
1358 * NOTE: We acquire the following locks in this routine (in order):
1359 * - rt_lock (kcf_reqid_table_t)
1362 * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
1364 * This locking order MUST be maintained in code every where else.
1367 crypto_cancel_req(crypto_req_id_t id
)
1370 kcf_areq_node_t
*areq
;
1371 kcf_provider_desc_t
*pd
;
1372 kcf_context_t
*ictx
;
1373 kcf_reqid_table_t
*rt
;
1375 rt
= kcf_reqid_table
[id
& REQID_TABLE_MASK
];
1376 indx
= REQID_HASH(id
);
1378 mutex_enter(&rt
->rt_lock
);
1379 for (areq
= rt
->rt_idhash
[indx
]; areq
; areq
= areq
->an_idnext
) {
1380 if (GET_REQID(areq
) == id
) {
1382 * We found the request. It is either still waiting
1383 * in the framework queues or running at the provider.
1385 pd
= areq
->an_provider
;
1388 switch (pd
->pd_prov_type
) {
1389 case CRYPTO_SW_PROVIDER
:
1390 mutex_enter(&gswq
->gs_lock
);
1391 mutex_enter(&areq
->an_lock
);
1393 /* This request can be safely canceled. */
1394 if (areq
->an_state
<= REQ_WAITING
) {
1395 /* Remove from gswq, global software queue. */
1396 kcf_remove_node(areq
);
1397 if ((ictx
= areq
->an_context
) != NULL
)
1398 kcf_removereq_in_ctxchain(ictx
, areq
);
1400 mutex_exit(&areq
->an_lock
);
1401 mutex_exit(&gswq
->gs_lock
);
1402 mutex_exit(&rt
->rt_lock
);
1404 /* Remove areq from hash table and free it. */
1405 kcf_reqid_delete(areq
);
1406 KCF_AREQ_REFRELE(areq
);
1410 mutex_exit(&areq
->an_lock
);
1411 mutex_exit(&gswq
->gs_lock
);
1414 case CRYPTO_HW_PROVIDER
:
1416 * There is no interface to remove an entry
1417 * once it is on the taskq. So, we do not do
1418 * any thing for a hardware provider.
1426 * The request is running. Wait for the request completion
1429 KCF_AREQ_REFHOLD(areq
);
1430 while (GET_REQID(areq
) == id
)
1431 cv_wait(&areq
->an_done
, &rt
->rt_lock
);
1432 KCF_AREQ_REFRELE(areq
);
1437 mutex_exit(&rt
->rt_lock
);
1441 * Cancel all asynchronous requests associated with the
1442 * passed in crypto context and free it.
1444 * A client SHOULD NOT call this routine after calling a crypto_*_final
1445 * routine. This routine is called only during intermediate operations.
1446 * The client should not use the crypto context after this function returns
1447 * since we destroy it.
1450 * Can be called from user context only.
1453 crypto_cancel_ctx(crypto_context_t ctx
)
1455 kcf_context_t
*ictx
;
1456 kcf_areq_node_t
*areq
;
1461 ictx
= (kcf_context_t
*)((crypto_ctx_t
*)ctx
)->cc_framework_private
;
1463 mutex_enter(&ictx
->kc_in_use_lock
);
1465 /* Walk the chain and cancel each request */
1466 while ((areq
= ictx
->kc_req_chain_first
) != NULL
) {
1468 * We have to drop the lock here as we may have
1469 * to wait for request completion. We hold the
1470 * request before dropping the lock though, so that it
1471 * won't be freed underneath us.
1473 KCF_AREQ_REFHOLD(areq
);
1474 mutex_exit(&ictx
->kc_in_use_lock
);
1476 crypto_cancel_req(GET_REQID(areq
));
1477 KCF_AREQ_REFRELE(areq
);
1479 mutex_enter(&ictx
->kc_in_use_lock
);
1482 mutex_exit(&ictx
->kc_in_use_lock
);
1483 KCF_CONTEXT_REFRELE(ictx
);
1490 kcf_misc_kstat_update(kstat_t
*ksp
, int rw
)
1493 kcf_stats_t
*ks_data
;
1495 if (rw
== KSTAT_WRITE
)
1498 ks_data
= ksp
->ks_data
;
1500 ks_data
->ks_thrs_in_pool
.value
.ui32
= kcfpool
->kp_threads
;
1502 * The failover thread is counted in kp_idlethreads in
1503 * some corner cases. This is done to avoid doing more checks
1504 * when submitting a request. We account for those cases below.
1506 if ((tcnt
= kcfpool
->kp_idlethreads
) == (kcfpool
->kp_threads
+ 1))
1508 ks_data
->ks_idle_thrs
.value
.ui32
= tcnt
;
1509 ks_data
->ks_minthrs
.value
.ui32
= kcf_minthreads
;
1510 ks_data
->ks_maxthrs
.value
.ui32
= kcf_maxthreads
;
1511 ks_data
->ks_swq_njobs
.value
.ui32
= gswq
->gs_njobs
;
1512 ks_data
->ks_swq_maxjobs
.value
.ui32
= gswq
->gs_maxjobs
;
1513 ks_data
->ks_taskq_threads
.value
.ui32
= crypto_taskq_threads
;
1514 ks_data
->ks_taskq_minalloc
.value
.ui32
= crypto_taskq_minalloc
;
1515 ks_data
->ks_taskq_maxalloc
.value
.ui32
= crypto_taskq_maxalloc
;
1521 * Allocate and initiatize a kcf_dual_req, used for saving the arguments of
1522 * a dual operation or an atomic operation that has to be internally
1523 * simulated with multiple single steps.
1524 * crq determines the memory allocation flags.
1528 kcf_alloc_req(crypto_call_req_t
*crq
)
1530 kcf_dual_req_t
*kcr
;
1532 kcr
= kmem_alloc(sizeof (kcf_dual_req_t
), KCF_KMFLAG(crq
));
1537 /* Copy the whole crypto_call_req struct, as it isn't persistant */
1539 kcr
->kr_callreq
= *crq
;
1541 bzero(&(kcr
->kr_callreq
), sizeof (crypto_call_req_t
));
1542 kcr
->kr_areq
= NULL
;
1543 kcr
->kr_saveoffset
= 0;
1544 kcr
->kr_savelen
= 0;
1550 * Callback routine for the next part of a simulated dual part.
1551 * Schedules the next step.
1553 * This routine can be called from interrupt context.
1556 kcf_next_req(void *next_req_arg
, int status
)
1558 kcf_dual_req_t
*next_req
= (kcf_dual_req_t
*)next_req_arg
;
1559 kcf_req_params_t
*params
= &(next_req
->kr_params
);
1560 kcf_areq_node_t
*areq
= next_req
->kr_areq
;
1562 kcf_provider_desc_t
*pd
= NULL
;
1563 crypto_dual_data_t
*ct
= NULL
;
1565 /* Stop the processing if an error occured at this step */
1566 if (error
!= CRYPTO_SUCCESS
) {
1568 areq
->an_reqarg
= next_req
->kr_callreq
;
1569 KCF_AREQ_REFRELE(areq
);
1570 kmem_free(next_req
, sizeof (kcf_dual_req_t
));
1571 areq
->an_isdual
= B_FALSE
;
1572 kcf_aop_done(areq
, error
);
1576 switch (params
->rp_opgrp
) {
1580 * The next req is submitted with the same reqid as the
1581 * first part. The consumer only got back that reqid, and
1582 * should still be able to cancel the operation during its
1585 kcf_mac_ops_params_t
*mops
= &(params
->rp_u
.mac_params
);
1586 crypto_ctx_template_t mac_tmpl
;
1587 kcf_mech_entry_t
*me
;
1589 ct
= (crypto_dual_data_t
*)mops
->mo_data
;
1590 mac_tmpl
= (crypto_ctx_template_t
)mops
->mo_templ
;
1592 /* No expected recoverable failures, so no retry list */
1593 pd
= kcf_get_mech_provider(mops
->mo_framework_mechtype
,
1594 &me
, &error
, NULL
, CRYPTO_FG_MAC_ATOMIC
,
1595 (areq
->an_reqarg
.cr_flag
& CRYPTO_RESTRICTED
), ct
->dd_len2
);
1598 error
= CRYPTO_MECH_NOT_SUPPORTED
;
1601 /* Validate the MAC context template here */
1602 if ((pd
->pd_prov_type
== CRYPTO_SW_PROVIDER
) &&
1603 (mac_tmpl
!= NULL
)) {
1604 kcf_ctx_template_t
*ctx_mac_tmpl
;
1606 ctx_mac_tmpl
= (kcf_ctx_template_t
*)mac_tmpl
;
1608 if (ctx_mac_tmpl
->ct_generation
!= me
->me_gen_swprov
) {
1609 KCF_PROV_REFRELE(pd
);
1610 error
= CRYPTO_OLD_CTX_TEMPLATE
;
1613 mops
->mo_templ
= ctx_mac_tmpl
->ct_prov_tmpl
;
1618 case KCF_OG_DECRYPT
: {
1619 kcf_decrypt_ops_params_t
*dcrops
=
1620 &(params
->rp_u
.decrypt_params
);
1622 ct
= (crypto_dual_data_t
*)dcrops
->dop_ciphertext
;
1623 /* No expected recoverable failures, so no retry list */
1624 pd
= kcf_get_mech_provider(dcrops
->dop_framework_mechtype
,
1625 NULL
, &error
, NULL
, CRYPTO_FG_DECRYPT_ATOMIC
,
1626 (areq
->an_reqarg
.cr_flag
& CRYPTO_RESTRICTED
), ct
->dd_len1
);
1629 error
= CRYPTO_MECH_NOT_SUPPORTED
;
1638 /* The second step uses len2 and offset2 of the dual_data */
1639 next_req
->kr_saveoffset
= ct
->dd_offset1
;
1640 next_req
->kr_savelen
= ct
->dd_len1
;
1641 ct
->dd_offset1
= ct
->dd_offset2
;
1642 ct
->dd_len1
= ct
->dd_len2
;
1644 /* preserve if the caller is restricted */
1645 if (areq
->an_reqarg
.cr_flag
& CRYPTO_RESTRICTED
) {
1646 areq
->an_reqarg
.cr_flag
= CRYPTO_RESTRICTED
;
1648 areq
->an_reqarg
.cr_flag
= 0;
1651 areq
->an_reqarg
.cr_callback_func
= kcf_last_req
;
1652 areq
->an_reqarg
.cr_callback_arg
= next_req
;
1653 areq
->an_isdual
= B_TRUE
;
1656 * We would like to call kcf_submit_request() here. But,
1657 * that is not possible as that routine allocates a new
1658 * kcf_areq_node_t request structure, while we need to
1659 * reuse the existing request structure.
1661 switch (pd
->pd_prov_type
) {
1662 case CRYPTO_SW_PROVIDER
:
1663 error
= common_submit_request(pd
, NULL
, params
,
1664 KCF_RHNDL(KM_NOSLEEP
));
1667 case CRYPTO_HW_PROVIDER
: {
1668 kcf_provider_desc_t
*old_pd
;
1669 taskq_t
*taskq
= pd
->pd_sched_info
.ks_taskq
;
1672 * Set the params for the second step in the
1675 areq
->an_params
= *params
;
1676 old_pd
= areq
->an_provider
;
1677 KCF_PROV_REFRELE(old_pd
);
1678 KCF_PROV_REFHOLD(pd
);
1679 areq
->an_provider
= pd
;
1682 * Note that we have to do a taskq_dispatch()
1683 * here as we may be in interrupt context.
1685 if (taskq_dispatch(taskq
, process_req_hwp
, areq
,
1686 TQ_NOSLEEP
) == (taskqid_t
)0) {
1687 error
= CRYPTO_HOST_MEMORY
;
1689 error
= CRYPTO_QUEUED
;
1698 * We have to release the holds on the request and the provider
1701 KCF_AREQ_REFRELE(areq
);
1702 KCF_PROV_REFRELE(pd
);
1704 if (error
!= CRYPTO_QUEUED
) {
1705 /* restore, clean up, and invoke the client's callback */
1707 ct
->dd_offset1
= next_req
->kr_saveoffset
;
1708 ct
->dd_len1
= next_req
->kr_savelen
;
1709 areq
->an_reqarg
= next_req
->kr_callreq
;
1710 kmem_free(next_req
, sizeof (kcf_dual_req_t
));
1711 areq
->an_isdual
= B_FALSE
;
1712 kcf_aop_done(areq
, error
);
1717 * Last part of an emulated dual operation.
1718 * Clean up and restore ...
1721 kcf_last_req(void *last_req_arg
, int status
)
1723 kcf_dual_req_t
*last_req
= (kcf_dual_req_t
*)last_req_arg
;
1725 kcf_req_params_t
*params
= &(last_req
->kr_params
);
1726 kcf_areq_node_t
*areq
= last_req
->kr_areq
;
1727 crypto_dual_data_t
*ct
= NULL
;
1729 switch (params
->rp_opgrp
) {
1731 kcf_mac_ops_params_t
*mops
= &(params
->rp_u
.mac_params
);
1733 ct
= (crypto_dual_data_t
*)mops
->mo_data
;
1736 case KCF_OG_DECRYPT
: {
1737 kcf_decrypt_ops_params_t
*dcrops
=
1738 &(params
->rp_u
.decrypt_params
);
1740 ct
= (crypto_dual_data_t
*)dcrops
->dop_ciphertext
;
1746 ct
->dd_offset1
= last_req
->kr_saveoffset
;
1747 ct
->dd_len1
= last_req
->kr_savelen
;
1749 /* The submitter used kcf_last_req as its callback */
1752 crypto_call_req_t
*cr
= &last_req
->kr_callreq
;
1754 (*(cr
->cr_callback_func
))(cr
->cr_callback_arg
, status
);
1755 kmem_free(last_req
, sizeof (kcf_dual_req_t
));
1758 areq
->an_reqarg
= last_req
->kr_callreq
;
1759 KCF_AREQ_REFRELE(areq
);
1760 kmem_free(last_req
, sizeof (kcf_dual_req_t
));
1761 areq
->an_isdual
= B_FALSE
;
1762 kcf_aop_done(areq
, status
);