4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * This file is part of the core Kernel Cryptographic Framework.
28 * It implements the SPI functions exported to cryptographic
33 #include <sys/zfs_context.h>
34 #include <sys/crypto/common.h>
35 #include <sys/crypto/impl.h>
36 #include <sys/crypto/sched_impl.h>
37 #include <sys/crypto/spi.h>
40 * minalloc and maxalloc values to be used for taskq_create().
42 const int crypto_taskq_threads
= CRYPTO_TASKQ_THREADS
;
43 const int crypto_taskq_minalloc
= CRYPTO_TASKQ_MIN
;
44 const int crypto_taskq_maxalloc
= CRYPTO_TASKQ_MAX
;
46 static void remove_provider(kcf_provider_desc_t
*);
47 static void process_logical_providers(const crypto_provider_info_t
*,
48 kcf_provider_desc_t
*);
49 static int init_prov_mechs(const crypto_provider_info_t
*,
50 kcf_provider_desc_t
*);
51 static int kcf_prov_kstat_update(kstat_t
*, int);
52 static void delete_kstat(kcf_provider_desc_t
*);
54 static const kcf_prov_stats_t kcf_stats_ks_data_template
= {
55 { "kcf_ops_total", KSTAT_DATA_UINT64
},
56 { "kcf_ops_passed", KSTAT_DATA_UINT64
},
57 { "kcf_ops_failed", KSTAT_DATA_UINT64
},
58 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64
}
61 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
62 memcpy((void *) (dst)->ops, (src)->ops, sizeof (*(src)->ops));
65 * Copy an ops vector from src to dst. Used during provider registration
66 * to copy the ops vector from the provider info structure to the
67 * provider descriptor maintained by KCF.
68 * Copying the ops vector specified by the provider is needed since the
69 * framework does not require the provider info structure to be
73 copy_ops_vector_v1(const crypto_ops_t
*src_ops
, crypto_ops_t
*dst_ops
)
75 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_control_ops
);
76 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_digest_ops
);
77 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_cipher_ops
);
78 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_mac_ops
);
79 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_sign_ops
);
80 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_verify_ops
);
81 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_dual_ops
);
82 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_dual_cipher_mac_ops
);
83 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_random_ops
);
84 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_session_ops
);
85 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_object_ops
);
86 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_key_ops
);
87 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_provider_ops
);
88 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_ctx_ops
);
92 copy_ops_vector_v2(const crypto_ops_t
*src_ops
, crypto_ops_t
*dst_ops
)
94 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_mech_ops
);
98 copy_ops_vector_v3(const crypto_ops_t
*src_ops
, crypto_ops_t
*dst_ops
)
100 KCF_SPI_COPY_OPS(src_ops
, dst_ops
, co_nostore_key_ops
);
104 * This routine is used to add cryptographic providers to the KEF framework.
105 * Providers pass a crypto_provider_info structure to crypto_register_provider()
106 * and get back a handle. The crypto_provider_info structure contains a
107 * list of mechanisms supported by the provider and an ops vector containing
108 * provider entry points. Hardware providers call this routine in their attach
109 * routines. Software providers call this routine in their _init() routine.
112 crypto_register_provider(const crypto_provider_info_t
*info
,
113 crypto_kcf_provider_handle_t
*handle
)
117 kcf_provider_desc_t
*prov_desc
= NULL
;
118 int ret
= CRYPTO_ARGUMENTS_BAD
;
120 if (info
->pi_interface_version
> CRYPTO_SPI_VERSION_3
)
121 return (CRYPTO_VERSION_MISMATCH
);
124 * Check provider type, must be software, hardware, or logical.
126 if (info
->pi_provider_type
!= CRYPTO_HW_PROVIDER
&&
127 info
->pi_provider_type
!= CRYPTO_SW_PROVIDER
&&
128 info
->pi_provider_type
!= CRYPTO_LOGICAL_PROVIDER
)
129 return (CRYPTO_ARGUMENTS_BAD
);
132 * Allocate and initialize a new provider descriptor. We also
133 * hold it and release it when done.
135 prov_desc
= kcf_alloc_provider_desc(info
);
136 KCF_PROV_REFHOLD(prov_desc
);
138 prov_desc
->pd_prov_type
= info
->pi_provider_type
;
140 /* provider-private handle, opaque to KCF */
141 prov_desc
->pd_prov_handle
= info
->pi_provider_handle
;
143 /* copy provider description string */
144 if (info
->pi_provider_description
!= NULL
) {
146 * pi_provider_descriptor is a string that can contain
147 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
148 * INCLUDING the terminating null character. A bcopy()
149 * is necessary here as pd_description should not have
150 * a null character. See comments in kcf_alloc_provider_desc()
151 * for details on pd_description field.
153 bcopy(info
->pi_provider_description
, prov_desc
->pd_description
,
154 MIN(strlen(info
->pi_provider_description
),
155 (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN
));
158 if (info
->pi_provider_type
!= CRYPTO_LOGICAL_PROVIDER
) {
159 if (info
->pi_ops_vector
== NULL
) {
162 crypto_ops_t
*pvec
= (crypto_ops_t
*)prov_desc
->pd_ops_vector
;
163 copy_ops_vector_v1(info
->pi_ops_vector
, pvec
);
164 if (info
->pi_interface_version
>= CRYPTO_SPI_VERSION_2
) {
165 copy_ops_vector_v2(info
->pi_ops_vector
, pvec
);
166 prov_desc
->pd_flags
= info
->pi_flags
;
168 if (info
->pi_interface_version
== CRYPTO_SPI_VERSION_3
) {
169 copy_ops_vector_v3(info
->pi_ops_vector
, pvec
);
173 /* object_ops and nostore_key_ops are mutually exclusive */
174 if (prov_desc
->pd_ops_vector
->co_object_ops
&&
175 prov_desc
->pd_ops_vector
->co_nostore_key_ops
) {
179 /* process the mechanisms supported by the provider */
180 if ((ret
= init_prov_mechs(info
, prov_desc
)) != CRYPTO_SUCCESS
)
184 * Add provider to providers tables, also sets the descriptor
187 if ((ret
= kcf_prov_tab_add_provider(prov_desc
)) != CRYPTO_SUCCESS
) {
188 undo_register_provider(prov_desc
, B_FALSE
);
193 * We create a taskq only for a hardware provider. The global
194 * software queue is used for software providers. We handle ordering
195 * of multi-part requests in the taskq routine. So, it is safe to
196 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
197 * to keep some entries cached to improve performance.
199 if (prov_desc
->pd_prov_type
== CRYPTO_HW_PROVIDER
)
200 prov_desc
->pd_sched_info
.ks_taskq
= taskq_create("kcf_taskq",
201 CRYPTO_TASKQ_THREADS
, minclsyspri
,
202 CRYPTO_TASKQ_MIN
, CRYPTO_TASKQ_MAX
,
205 prov_desc
->pd_sched_info
.ks_taskq
= NULL
;
207 /* no kernel session to logical providers */
208 if (prov_desc
->pd_prov_type
!= CRYPTO_LOGICAL_PROVIDER
) {
210 * Open a session for session-oriented providers. This session
211 * is used for all kernel consumers. This is fine as a provider
212 * is required to support multiple thread access to a session.
213 * We can do this only after the taskq has been created as we
214 * do a kcf_submit_request() to open the session.
216 if (KCF_PROV_SESSION_OPS(prov_desc
) != NULL
) {
217 kcf_req_params_t params
;
219 KCF_WRAP_SESSION_OPS_PARAMS(¶ms
,
220 KCF_OP_SESSION_OPEN
, &prov_desc
->pd_sid
, 0,
221 CRYPTO_USER
, NULL
, 0, prov_desc
);
222 ret
= kcf_submit_request(prov_desc
, NULL
, NULL
, ¶ms
,
225 if (ret
!= CRYPTO_SUCCESS
) {
226 undo_register_provider(prov_desc
, B_TRUE
);
233 if (prov_desc
->pd_prov_type
!= CRYPTO_LOGICAL_PROVIDER
) {
235 * Create the kstat for this provider. There is a kstat
236 * installed for each successfully registered provider.
237 * This kstat is deleted, when the provider unregisters.
239 if (prov_desc
->pd_prov_type
== CRYPTO_SW_PROVIDER
) {
240 ks_name
= kmem_asprintf("%s_%s",
241 "NONAME", "provider_stats");
243 ks_name
= kmem_asprintf("%s_%d_%u_%s",
244 "NONAME", 0, prov_desc
->pd_prov_id
,
248 prov_desc
->pd_kstat
= kstat_create("kcf", 0, ks_name
, "crypto",
249 KSTAT_TYPE_NAMED
, sizeof (kcf_prov_stats_t
) /
250 sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
252 if (prov_desc
->pd_kstat
!= NULL
) {
253 bcopy(&kcf_stats_ks_data_template
,
254 &prov_desc
->pd_ks_data
,
255 sizeof (kcf_stats_ks_data_template
));
256 prov_desc
->pd_kstat
->ks_data
= &prov_desc
->pd_ks_data
;
257 KCF_PROV_REFHOLD(prov_desc
);
258 KCF_PROV_IREFHOLD(prov_desc
);
259 prov_desc
->pd_kstat
->ks_private
= prov_desc
;
260 prov_desc
->pd_kstat
->ks_update
= kcf_prov_kstat_update
;
261 kstat_install(prov_desc
->pd_kstat
);
263 kmem_strfree(ks_name
);
266 if (prov_desc
->pd_prov_type
== CRYPTO_HW_PROVIDER
)
267 process_logical_providers(info
, prov_desc
);
269 mutex_enter(&prov_desc
->pd_lock
);
270 prov_desc
->pd_state
= KCF_PROV_READY
;
271 mutex_exit(&prov_desc
->pd_lock
);
272 kcf_do_notify(prov_desc
, B_TRUE
);
274 *handle
= prov_desc
->pd_kcf_prov_handle
;
275 ret
= CRYPTO_SUCCESS
;
278 KCF_PROV_REFRELE(prov_desc
);
283 * This routine is used to notify the framework when a provider is being
284 * removed. Hardware providers call this routine in their detach routines.
285 * Software providers call this routine in their _fini() routine.
288 crypto_unregister_provider(crypto_kcf_provider_handle_t handle
)
291 kcf_provider_desc_t
*desc
;
292 kcf_prov_state_t saved_state
;
294 /* lookup provider descriptor */
295 if ((desc
= kcf_prov_tab_lookup((crypto_provider_id_t
)handle
)) == NULL
)
296 return (CRYPTO_UNKNOWN_PROVIDER
);
298 mutex_enter(&desc
->pd_lock
);
300 * Check if any other thread is disabling or removing
301 * this provider. We return if this is the case.
303 if (desc
->pd_state
>= KCF_PROV_DISABLED
) {
304 mutex_exit(&desc
->pd_lock
);
305 /* Release reference held by kcf_prov_tab_lookup(). */
306 KCF_PROV_REFRELE(desc
);
307 return (CRYPTO_BUSY
);
310 saved_state
= desc
->pd_state
;
311 desc
->pd_state
= KCF_PROV_REMOVED
;
313 if (saved_state
== KCF_PROV_BUSY
) {
315 * The per-provider taskq threads may be waiting. We
316 * signal them so that they can start failing requests.
318 cv_broadcast(&desc
->pd_resume_cv
);
321 if (desc
->pd_prov_type
== CRYPTO_SW_PROVIDER
) {
323 * Check if this provider is currently being used.
324 * pd_irefcnt is the number of holds from the internal
325 * structures. We add one to account for the above lookup.
327 if (desc
->pd_refcnt
> desc
->pd_irefcnt
+ 1) {
328 desc
->pd_state
= saved_state
;
329 mutex_exit(&desc
->pd_lock
);
330 /* Release reference held by kcf_prov_tab_lookup(). */
331 KCF_PROV_REFRELE(desc
);
333 * The administrator presumably will stop the clients
334 * thus removing the holds, when they get the busy
335 * return value. Any retry will succeed then.
337 return (CRYPTO_BUSY
);
340 mutex_exit(&desc
->pd_lock
);
342 if (desc
->pd_prov_type
!= CRYPTO_SW_PROVIDER
) {
343 remove_provider(desc
);
346 if (desc
->pd_prov_type
!= CRYPTO_LOGICAL_PROVIDER
) {
347 /* remove the provider from the mechanisms tables */
348 for (mech_idx
= 0; mech_idx
< desc
->pd_mech_list_count
;
350 kcf_remove_mech_provider(
351 desc
->pd_mechanisms
[mech_idx
].cm_mech_name
, desc
);
355 /* remove provider from providers table */
356 if (kcf_prov_tab_rem_provider((crypto_provider_id_t
)handle
) !=
358 /* Release reference held by kcf_prov_tab_lookup(). */
359 KCF_PROV_REFRELE(desc
);
360 return (CRYPTO_UNKNOWN_PROVIDER
);
365 if (desc
->pd_prov_type
== CRYPTO_SW_PROVIDER
) {
366 /* Release reference held by kcf_prov_tab_lookup(). */
367 KCF_PROV_REFRELE(desc
);
370 * Wait till the existing requests complete.
372 mutex_enter(&desc
->pd_lock
);
373 while (desc
->pd_state
!= KCF_PROV_FREED
)
374 cv_wait(&desc
->pd_remove_cv
, &desc
->pd_lock
);
375 mutex_exit(&desc
->pd_lock
);
378 * Wait until requests that have been sent to the provider
381 mutex_enter(&desc
->pd_lock
);
382 while (desc
->pd_irefcnt
> 0)
383 cv_wait(&desc
->pd_remove_cv
, &desc
->pd_lock
);
384 mutex_exit(&desc
->pd_lock
);
387 kcf_do_notify(desc
, B_FALSE
);
389 if (desc
->pd_prov_type
== CRYPTO_SW_PROVIDER
) {
391 * This is the only place where kcf_free_provider_desc()
392 * is called directly. KCF_PROV_REFRELE() should free the
393 * structure in all other places.
395 ASSERT(desc
->pd_state
== KCF_PROV_FREED
&&
396 desc
->pd_refcnt
== 0);
397 kcf_free_provider_desc(desc
);
399 KCF_PROV_REFRELE(desc
);
402 return (CRYPTO_SUCCESS
);
406 * This routine is used to notify the framework that the state of
407 * a cryptographic provider has changed. Valid state codes are:
409 * CRYPTO_PROVIDER_READY
410 * The provider indicates that it can process more requests. A provider
411 * will notify with this event if it previously has notified us with a
412 * CRYPTO_PROVIDER_BUSY.
414 * CRYPTO_PROVIDER_BUSY
415 * The provider can not take more requests.
417 * CRYPTO_PROVIDER_FAILED
418 * The provider encountered an internal error. The framework will not
419 * be sending any more requests to the provider. The provider may notify
420 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
422 * This routine can be called from user or interrupt context.
425 crypto_provider_notification(crypto_kcf_provider_handle_t handle
, uint_t state
)
427 kcf_provider_desc_t
*pd
;
429 /* lookup the provider from the given handle */
430 if ((pd
= kcf_prov_tab_lookup((crypto_provider_id_t
)handle
)) == NULL
)
433 mutex_enter(&pd
->pd_lock
);
435 if (pd
->pd_state
<= KCF_PROV_VERIFICATION_FAILED
)
438 if (pd
->pd_prov_type
== CRYPTO_LOGICAL_PROVIDER
) {
439 cmn_err(CE_WARN
, "crypto_provider_notification: "
440 "logical provider (%x) ignored\n", handle
);
444 case CRYPTO_PROVIDER_READY
:
445 switch (pd
->pd_state
) {
447 pd
->pd_state
= KCF_PROV_READY
;
449 * Signal the per-provider taskq threads that they
450 * can start submitting requests.
452 cv_broadcast(&pd
->pd_resume_cv
);
455 case KCF_PROV_FAILED
:
457 * The provider recovered from the error. Let us
460 pd
->pd_state
= KCF_PROV_READY
;
467 case CRYPTO_PROVIDER_BUSY
:
468 switch (pd
->pd_state
) {
470 pd
->pd_state
= KCF_PROV_BUSY
;
477 case CRYPTO_PROVIDER_FAILED
:
479 * We note the failure and return. The per-provider taskq
480 * threads check this flag and start failing the
481 * requests, if it is set. See process_req_hwp() for details.
483 switch (pd
->pd_state
) {
485 pd
->pd_state
= KCF_PROV_FAILED
;
489 pd
->pd_state
= KCF_PROV_FAILED
;
491 * The per-provider taskq threads may be waiting. We
492 * signal them so that they can start failing requests.
494 cv_broadcast(&pd
->pd_resume_cv
);
504 mutex_exit(&pd
->pd_lock
);
505 KCF_PROV_REFRELE(pd
);
509 * This routine is used to notify the framework the result of
510 * an asynchronous request handled by a provider. Valid error
511 * codes are the same as the CRYPTO_* errors defined in common.h.
513 * This routine can be called from user or interrupt context.
516 crypto_op_notification(crypto_req_handle_t handle
, int error
)
518 kcf_call_type_t ctype
;
523 if ((ctype
= GET_REQ_TYPE(handle
)) == CRYPTO_SYNCH
) {
524 kcf_sreq_node_t
*sreq
= (kcf_sreq_node_t
*)handle
;
526 if (error
!= CRYPTO_SUCCESS
)
527 sreq
->sn_provider
->pd_sched_info
.ks_nfails
++;
528 KCF_PROV_IREFRELE(sreq
->sn_provider
);
529 kcf_sop_done(sreq
, error
);
531 kcf_areq_node_t
*areq
= (kcf_areq_node_t
*)handle
;
533 ASSERT(ctype
== CRYPTO_ASYNCH
);
534 if (error
!= CRYPTO_SUCCESS
)
535 areq
->an_provider
->pd_sched_info
.ks_nfails
++;
536 KCF_PROV_IREFRELE(areq
->an_provider
);
537 kcf_aop_done(areq
, error
);
542 * This routine is used by software providers to determine
543 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
544 * Note that hardware providers can always use KM_SLEEP. So,
545 * they do not need to call this routine.
547 * This routine can be called from user or interrupt context.
550 crypto_kmflag(crypto_req_handle_t handle
)
552 return (REQHNDL2_KMFLAG(handle
));
556 * Process the mechanism info structures specified by the provider
557 * during registration. A NULL crypto_provider_info_t indicates
558 * an already initialized provider descriptor.
560 * Mechanisms are not added to the kernel's mechanism table if the
561 * provider is a logical provider.
563 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
564 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
565 * if the table of mechanisms is full.
568 init_prov_mechs(const crypto_provider_info_t
*info
, kcf_provider_desc_t
*desc
)
572 int err
= CRYPTO_SUCCESS
;
573 kcf_prov_mech_desc_t
*pmd
;
574 int desc_use_count
= 0;
575 int mcount
= desc
->pd_mech_list_count
;
577 if (desc
->pd_prov_type
== CRYPTO_LOGICAL_PROVIDER
) {
579 ASSERT(info
->pi_mechanisms
!= NULL
);
580 bcopy(info
->pi_mechanisms
, desc
->pd_mechanisms
,
581 sizeof (crypto_mech_info_t
) * mcount
);
583 return (CRYPTO_SUCCESS
);
587 * Copy the mechanism list from the provider info to the provider
588 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
589 * element if the provider has random_ops since we keep an internal
590 * mechanism, SUN_RANDOM, in this case.
593 if (info
->pi_ops_vector
->co_random_ops
!= NULL
) {
594 crypto_mech_info_t
*rand_mi
;
597 * Need the following check as it is possible to have
598 * a provider that implements just random_ops and has
599 * pi_mechanisms == NULL.
601 if (info
->pi_mechanisms
!= NULL
) {
602 bcopy(info
->pi_mechanisms
, desc
->pd_mechanisms
,
603 sizeof (crypto_mech_info_t
) * (mcount
- 1));
605 rand_mi
= &desc
->pd_mechanisms
[mcount
- 1];
607 bzero(rand_mi
, sizeof (crypto_mech_info_t
));
608 (void) strncpy(rand_mi
->cm_mech_name
, SUN_RANDOM
,
609 CRYPTO_MAX_MECH_NAME
);
610 rand_mi
->cm_func_group_mask
= CRYPTO_FG_RANDOM
;
612 ASSERT(info
->pi_mechanisms
!= NULL
);
613 bcopy(info
->pi_mechanisms
, desc
->pd_mechanisms
,
614 sizeof (crypto_mech_info_t
) * mcount
);
619 * For each mechanism support by the provider, add the provider
620 * to the corresponding KCF mechanism mech_entry chain.
622 for (mech_idx
= 0; mech_idx
< desc
->pd_mech_list_count
; mech_idx
++) {
623 crypto_mech_info_t
*mi
= &desc
->pd_mechanisms
[mech_idx
];
625 if ((mi
->cm_mech_flags
& CRYPTO_KEYSIZE_UNIT_IN_BITS
) &&
626 (mi
->cm_mech_flags
& CRYPTO_KEYSIZE_UNIT_IN_BYTES
)) {
627 err
= CRYPTO_ARGUMENTS_BAD
;
631 if (desc
->pd_flags
& CRYPTO_HASH_NO_UPDATE
&&
632 mi
->cm_func_group_mask
& CRYPTO_FG_DIGEST
) {
634 * We ask the provider to specify the limit
635 * per hash mechanism. But, in practice, a
636 * hardware limitation means all hash mechanisms
637 * will have the same maximum size allowed for
638 * input data. So, we make it a per provider
639 * limit to keep it simple.
641 if (mi
->cm_max_input_length
== 0) {
642 err
= CRYPTO_ARGUMENTS_BAD
;
645 desc
->pd_hash_limit
= mi
->cm_max_input_length
;
649 if ((err
= kcf_add_mech_provider(mech_idx
, desc
, &pmd
)) !=
656 /* The provider will be used for this mechanism */
661 * Don't allow multiple software providers with disabled mechanisms
662 * to register. Subsequent enabling of mechanisms will result in
663 * an unsupported configuration, i.e. multiple software providers
666 if (desc_use_count
== 0 && desc
->pd_prov_type
== CRYPTO_SW_PROVIDER
)
667 return (CRYPTO_ARGUMENTS_BAD
);
669 if (err
== KCF_SUCCESS
)
670 return (CRYPTO_SUCCESS
);
673 * An error occurred while adding the mechanism, cleanup
676 for (cleanup_idx
= 0; cleanup_idx
< mech_idx
; cleanup_idx
++) {
677 kcf_remove_mech_provider(
678 desc
->pd_mechanisms
[cleanup_idx
].cm_mech_name
, desc
);
681 if (err
== KCF_MECH_TAB_FULL
)
682 return (CRYPTO_HOST_MEMORY
);
684 return (CRYPTO_ARGUMENTS_BAD
);
688 * Update routine for kstat. Only privileged users are allowed to
689 * access this information, since this information is sensitive.
690 * There are some cryptographic attacks (e.g. traffic analysis)
691 * which can use this information.
694 kcf_prov_kstat_update(kstat_t
*ksp
, int rw
)
696 kcf_prov_stats_t
*ks_data
;
697 kcf_provider_desc_t
*pd
= (kcf_provider_desc_t
*)ksp
->ks_private
;
699 if (rw
== KSTAT_WRITE
)
702 ks_data
= ksp
->ks_data
;
704 ks_data
->ps_ops_total
.value
.ui64
= pd
->pd_sched_info
.ks_ndispatches
;
705 ks_data
->ps_ops_failed
.value
.ui64
= pd
->pd_sched_info
.ks_nfails
;
706 ks_data
->ps_ops_busy_rval
.value
.ui64
= pd
->pd_sched_info
.ks_nbusy_rval
;
707 ks_data
->ps_ops_passed
.value
.ui64
=
708 pd
->pd_sched_info
.ks_ndispatches
-
709 pd
->pd_sched_info
.ks_nfails
-
710 pd
->pd_sched_info
.ks_nbusy_rval
;
717 * Utility routine called from failure paths in crypto_register_provider()
718 * and from crypto_load_soft_disabled().
721 undo_register_provider(kcf_provider_desc_t
*desc
, boolean_t remove_prov
)
725 /* remove the provider from the mechanisms tables */
726 for (mech_idx
= 0; mech_idx
< desc
->pd_mech_list_count
;
728 kcf_remove_mech_provider(
729 desc
->pd_mechanisms
[mech_idx
].cm_mech_name
, desc
);
732 /* remove provider from providers table */
734 (void) kcf_prov_tab_rem_provider(desc
->pd_prov_id
);
738 * Utility routine called from crypto_load_soft_disabled(). Callers
739 * should have done a prior undo_register_provider().
742 redo_register_provider(kcf_provider_desc_t
*pd
)
744 /* process the mechanisms supported by the provider */
745 (void) init_prov_mechs(NULL
, pd
);
748 * Hold provider in providers table. We should not call
749 * kcf_prov_tab_add_provider() here as the provider descriptor
750 * is still valid which means it has an entry in the provider
753 KCF_PROV_REFHOLD(pd
);
754 KCF_PROV_IREFHOLD(pd
);
758 * Add provider (p1) to another provider's array of providers (p2).
759 * Hardware and logical providers use this array to cross-reference
763 add_provider_to_array(kcf_provider_desc_t
*p1
, kcf_provider_desc_t
*p2
)
765 kcf_provider_list_t
*new;
767 new = kmem_alloc(sizeof (kcf_provider_list_t
), KM_SLEEP
);
768 mutex_enter(&p2
->pd_lock
);
769 new->pl_next
= p2
->pd_provider_list
;
770 p2
->pd_provider_list
= new;
771 KCF_PROV_IREFHOLD(p1
);
772 new->pl_provider
= p1
;
773 mutex_exit(&p2
->pd_lock
);
777 * Remove provider (p1) from another provider's array of providers (p2).
778 * Hardware and logical providers use this array to cross-reference
782 remove_provider_from_array(kcf_provider_desc_t
*p1
, kcf_provider_desc_t
*p2
)
785 kcf_provider_list_t
*pl
= NULL
, **prev
;
787 mutex_enter(&p2
->pd_lock
);
788 for (pl
= p2
->pd_provider_list
, prev
= &p2
->pd_provider_list
;
789 pl
!= NULL
; prev
= &pl
->pl_next
, pl
= pl
->pl_next
) {
790 if (pl
->pl_provider
== p1
) {
796 mutex_exit(&p2
->pd_lock
);
800 /* detach and free kcf_provider_list structure */
801 KCF_PROV_IREFRELE(p1
);
803 kmem_free(pl
, sizeof (*pl
));
804 mutex_exit(&p2
->pd_lock
);
808 * Convert an array of logical provider handles (crypto_provider_id)
809 * stored in a crypto_provider_info structure into an array of provider
810 * descriptors (kcf_provider_desc_t) attached to a logical provider.
813 process_logical_providers(const crypto_provider_info_t
*info
,
814 kcf_provider_desc_t
*hp
)
816 kcf_provider_desc_t
*lp
;
817 crypto_provider_id_t handle
;
818 int count
= info
->pi_logical_provider_count
;
821 /* add hardware provider to each logical provider */
822 for (i
= 0; i
< count
; i
++) {
823 handle
= info
->pi_logical_providers
[i
];
824 lp
= kcf_prov_tab_lookup((crypto_provider_id_t
)handle
);
828 add_provider_to_array(hp
, lp
);
829 hp
->pd_flags
|= KCF_LPROV_MEMBER
;
832 * A hardware provider has to have the provider descriptor of
833 * every logical provider it belongs to, so it can be removed
834 * from the logical provider if the hardware provider
835 * unregisters from the framework.
837 add_provider_to_array(lp
, hp
);
838 KCF_PROV_REFRELE(lp
);
843 * This routine removes a provider from all of the logical or
844 * hardware providers it belongs to, and frees the provider's
845 * array of pointers to providers.
848 remove_provider(kcf_provider_desc_t
*pp
)
850 kcf_provider_desc_t
*p
;
851 kcf_provider_list_t
*e
, *next
;
853 mutex_enter(&pp
->pd_lock
);
854 for (e
= pp
->pd_provider_list
; e
!= NULL
; e
= next
) {
856 remove_provider_from_array(pp
, p
);
857 if (p
->pd_prov_type
== CRYPTO_HW_PROVIDER
&&
858 p
->pd_provider_list
== NULL
)
859 p
->pd_flags
&= ~KCF_LPROV_MEMBER
;
860 KCF_PROV_IREFRELE(p
);
862 kmem_free(e
, sizeof (*e
));
864 pp
->pd_provider_list
= NULL
;
865 mutex_exit(&pp
->pd_lock
);
869 * Dispatch events as needed for a provider. is_added flag tells
870 * whether the provider is registering or unregistering.
873 kcf_do_notify(kcf_provider_desc_t
*prov_desc
, boolean_t is_added
)
876 crypto_notify_event_change_t ec
;
878 ASSERT(prov_desc
->pd_state
> KCF_PROV_VERIFICATION_FAILED
);
881 * Inform interested clients of the mechanisms becoming
882 * available/unavailable. We skip this for logical providers
883 * as they do not affect mechanisms.
885 if (prov_desc
->pd_prov_type
!= CRYPTO_LOGICAL_PROVIDER
) {
886 ec
.ec_provider_type
= prov_desc
->pd_prov_type
;
887 ec
.ec_change
= is_added
? CRYPTO_MECH_ADDED
:
889 for (i
= 0; i
< prov_desc
->pd_mech_list_count
; i
++) {
890 (void) strlcpy(ec
.ec_mech_name
,
891 prov_desc
->pd_mechanisms
[i
].cm_mech_name
,
892 CRYPTO_MAX_MECH_NAME
);
893 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED
, &ec
);
899 * Inform interested clients about the new or departing provider.
900 * In case of a logical provider, we need to notify the event only
901 * for the logical provider and not for the underlying
902 * providers which are known by the KCF_LPROV_MEMBER bit.
904 if (prov_desc
->pd_prov_type
== CRYPTO_LOGICAL_PROVIDER
||
905 (prov_desc
->pd_flags
& KCF_LPROV_MEMBER
) == 0) {
906 kcf_walk_ntfylist(is_added
? CRYPTO_EVENT_PROVIDER_REGISTERED
:
907 CRYPTO_EVENT_PROVIDER_UNREGISTERED
, prov_desc
);
912 delete_kstat(kcf_provider_desc_t
*desc
)
914 /* destroy the kstat created for this provider */
915 if (desc
->pd_kstat
!= NULL
) {
916 kcf_provider_desc_t
*kspd
= desc
->pd_kstat
->ks_private
;
918 /* release reference held by desc->pd_kstat->ks_private */
919 ASSERT(desc
== kspd
);
920 kstat_delete(kspd
->pd_kstat
);
921 desc
->pd_kstat
= NULL
;
922 KCF_PROV_REFRELE(kspd
);
923 KCF_PROV_IREFRELE(kspd
);