]> git.proxmox.com Git - mirror_zfs.git/blob - module/icp/spi/kcf_spi.c
module/*.ko: prune .data, global .rodata
[mirror_zfs.git] / module / icp / spi / kcf_spi.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * This file is part of the core Kernel Cryptographic Framework.
28 * It implements the SPI functions exported to cryptographic
29 * providers.
30 */
31
32
33 #include <sys/zfs_context.h>
34 #include <sys/crypto/common.h>
35 #include <sys/crypto/impl.h>
36 #include <sys/crypto/sched_impl.h>
37 #include <sys/crypto/spi.h>
38
39 /*
40 * minalloc and maxalloc values to be used for taskq_create().
41 */
42 const int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
43 const int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN;
44 const int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
45
46 static void remove_provider(kcf_provider_desc_t *);
47 static void process_logical_providers(const crypto_provider_info_t *,
48 kcf_provider_desc_t *);
49 static int init_prov_mechs(const crypto_provider_info_t *,
50 kcf_provider_desc_t *);
51 static int kcf_prov_kstat_update(kstat_t *, int);
52 static void delete_kstat(kcf_provider_desc_t *);
53
54 static const kcf_prov_stats_t kcf_stats_ks_data_template = {
55 { "kcf_ops_total", KSTAT_DATA_UINT64 },
56 { "kcf_ops_passed", KSTAT_DATA_UINT64 },
57 { "kcf_ops_failed", KSTAT_DATA_UINT64 },
58 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 }
59 };
60
61 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
62 memcpy((void *) (dst)->ops, (src)->ops, sizeof (*(src)->ops));
63
64 /*
65 * Copy an ops vector from src to dst. Used during provider registration
66 * to copy the ops vector from the provider info structure to the
67 * provider descriptor maintained by KCF.
68 * Copying the ops vector specified by the provider is needed since the
69 * framework does not require the provider info structure to be
70 * persistent.
71 */
72 static void
73 copy_ops_vector_v1(const crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
74 {
75 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
76 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
77 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
78 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
79 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
80 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
81 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
82 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
83 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
84 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
85 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
86 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
87 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
88 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
89 }
90
91 static void
92 copy_ops_vector_v2(const crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
93 {
94 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
95 }
96
97 static void
98 copy_ops_vector_v3(const crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
99 {
100 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
101 }
102
103 /*
104 * This routine is used to add cryptographic providers to the KEF framework.
105 * Providers pass a crypto_provider_info structure to crypto_register_provider()
106 * and get back a handle. The crypto_provider_info structure contains a
107 * list of mechanisms supported by the provider and an ops vector containing
108 * provider entry points. Hardware providers call this routine in their attach
109 * routines. Software providers call this routine in their _init() routine.
110 */
111 int
112 crypto_register_provider(const crypto_provider_info_t *info,
113 crypto_kcf_provider_handle_t *handle)
114 {
115 char *ks_name;
116
117 kcf_provider_desc_t *prov_desc = NULL;
118 int ret = CRYPTO_ARGUMENTS_BAD;
119
120 if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
121 return (CRYPTO_VERSION_MISMATCH);
122
123 /*
124 * Check provider type, must be software, hardware, or logical.
125 */
126 if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
127 info->pi_provider_type != CRYPTO_SW_PROVIDER &&
128 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
129 return (CRYPTO_ARGUMENTS_BAD);
130
131 /*
132 * Allocate and initialize a new provider descriptor. We also
133 * hold it and release it when done.
134 */
135 prov_desc = kcf_alloc_provider_desc(info);
136 KCF_PROV_REFHOLD(prov_desc);
137
138 prov_desc->pd_prov_type = info->pi_provider_type;
139
140 /* provider-private handle, opaque to KCF */
141 prov_desc->pd_prov_handle = info->pi_provider_handle;
142
143 /* copy provider description string */
144 if (info->pi_provider_description != NULL) {
145 /*
146 * pi_provider_descriptor is a string that can contain
147 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
148 * INCLUDING the terminating null character. A bcopy()
149 * is necessary here as pd_description should not have
150 * a null character. See comments in kcf_alloc_provider_desc()
151 * for details on pd_description field.
152 */
153 bcopy(info->pi_provider_description, prov_desc->pd_description,
154 MIN(strlen(info->pi_provider_description),
155 (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN));
156 }
157
158 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
159 if (info->pi_ops_vector == NULL) {
160 goto bail;
161 }
162 crypto_ops_t *pvec = (crypto_ops_t *)prov_desc->pd_ops_vector;
163 copy_ops_vector_v1(info->pi_ops_vector, pvec);
164 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
165 copy_ops_vector_v2(info->pi_ops_vector, pvec);
166 prov_desc->pd_flags = info->pi_flags;
167 }
168 if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
169 copy_ops_vector_v3(info->pi_ops_vector, pvec);
170 }
171 }
172
173 /* object_ops and nostore_key_ops are mutually exclusive */
174 if (prov_desc->pd_ops_vector->co_object_ops &&
175 prov_desc->pd_ops_vector->co_nostore_key_ops) {
176 goto bail;
177 }
178
179 /* process the mechanisms supported by the provider */
180 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
181 goto bail;
182
183 /*
184 * Add provider to providers tables, also sets the descriptor
185 * pd_prov_id field.
186 */
187 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
188 undo_register_provider(prov_desc, B_FALSE);
189 goto bail;
190 }
191
192 /*
193 * We create a taskq only for a hardware provider. The global
194 * software queue is used for software providers. We handle ordering
195 * of multi-part requests in the taskq routine. So, it is safe to
196 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
197 * to keep some entries cached to improve performance.
198 */
199 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
200 prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
201 CRYPTO_TASKQ_THREADS, minclsyspri,
202 CRYPTO_TASKQ_MIN, CRYPTO_TASKQ_MAX,
203 TASKQ_PREPOPULATE);
204 else
205 prov_desc->pd_sched_info.ks_taskq = NULL;
206
207 /* no kernel session to logical providers */
208 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
209 /*
210 * Open a session for session-oriented providers. This session
211 * is used for all kernel consumers. This is fine as a provider
212 * is required to support multiple thread access to a session.
213 * We can do this only after the taskq has been created as we
214 * do a kcf_submit_request() to open the session.
215 */
216 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
217 kcf_req_params_t params;
218
219 KCF_WRAP_SESSION_OPS_PARAMS(&params,
220 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
221 CRYPTO_USER, NULL, 0, prov_desc);
222 ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
223 B_FALSE);
224
225 if (ret != CRYPTO_SUCCESS) {
226 undo_register_provider(prov_desc, B_TRUE);
227 ret = CRYPTO_FAILED;
228 goto bail;
229 }
230 }
231 }
232
233 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
234 /*
235 * Create the kstat for this provider. There is a kstat
236 * installed for each successfully registered provider.
237 * This kstat is deleted, when the provider unregisters.
238 */
239 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
240 ks_name = kmem_asprintf("%s_%s",
241 "NONAME", "provider_stats");
242 } else {
243 ks_name = kmem_asprintf("%s_%d_%u_%s",
244 "NONAME", 0, prov_desc->pd_prov_id,
245 "provider_stats");
246 }
247
248 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
249 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
250 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
251
252 if (prov_desc->pd_kstat != NULL) {
253 bcopy(&kcf_stats_ks_data_template,
254 &prov_desc->pd_ks_data,
255 sizeof (kcf_stats_ks_data_template));
256 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
257 KCF_PROV_REFHOLD(prov_desc);
258 KCF_PROV_IREFHOLD(prov_desc);
259 prov_desc->pd_kstat->ks_private = prov_desc;
260 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
261 kstat_install(prov_desc->pd_kstat);
262 }
263 kmem_strfree(ks_name);
264 }
265
266 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
267 process_logical_providers(info, prov_desc);
268
269 mutex_enter(&prov_desc->pd_lock);
270 prov_desc->pd_state = KCF_PROV_READY;
271 mutex_exit(&prov_desc->pd_lock);
272 kcf_do_notify(prov_desc, B_TRUE);
273
274 *handle = prov_desc->pd_kcf_prov_handle;
275 ret = CRYPTO_SUCCESS;
276
277 bail:
278 KCF_PROV_REFRELE(prov_desc);
279 return (ret);
280 }
281
282 /*
283 * This routine is used to notify the framework when a provider is being
284 * removed. Hardware providers call this routine in their detach routines.
285 * Software providers call this routine in their _fini() routine.
286 */
287 int
288 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
289 {
290 uint_t mech_idx;
291 kcf_provider_desc_t *desc;
292 kcf_prov_state_t saved_state;
293
294 /* lookup provider descriptor */
295 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
296 return (CRYPTO_UNKNOWN_PROVIDER);
297
298 mutex_enter(&desc->pd_lock);
299 /*
300 * Check if any other thread is disabling or removing
301 * this provider. We return if this is the case.
302 */
303 if (desc->pd_state >= KCF_PROV_DISABLED) {
304 mutex_exit(&desc->pd_lock);
305 /* Release reference held by kcf_prov_tab_lookup(). */
306 KCF_PROV_REFRELE(desc);
307 return (CRYPTO_BUSY);
308 }
309
310 saved_state = desc->pd_state;
311 desc->pd_state = KCF_PROV_REMOVED;
312
313 if (saved_state == KCF_PROV_BUSY) {
314 /*
315 * The per-provider taskq threads may be waiting. We
316 * signal them so that they can start failing requests.
317 */
318 cv_broadcast(&desc->pd_resume_cv);
319 }
320
321 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
322 /*
323 * Check if this provider is currently being used.
324 * pd_irefcnt is the number of holds from the internal
325 * structures. We add one to account for the above lookup.
326 */
327 if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
328 desc->pd_state = saved_state;
329 mutex_exit(&desc->pd_lock);
330 /* Release reference held by kcf_prov_tab_lookup(). */
331 KCF_PROV_REFRELE(desc);
332 /*
333 * The administrator presumably will stop the clients
334 * thus removing the holds, when they get the busy
335 * return value. Any retry will succeed then.
336 */
337 return (CRYPTO_BUSY);
338 }
339 }
340 mutex_exit(&desc->pd_lock);
341
342 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
343 remove_provider(desc);
344 }
345
346 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
347 /* remove the provider from the mechanisms tables */
348 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
349 mech_idx++) {
350 kcf_remove_mech_provider(
351 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
352 }
353 }
354
355 /* remove provider from providers table */
356 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
357 CRYPTO_SUCCESS) {
358 /* Release reference held by kcf_prov_tab_lookup(). */
359 KCF_PROV_REFRELE(desc);
360 return (CRYPTO_UNKNOWN_PROVIDER);
361 }
362
363 delete_kstat(desc);
364
365 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
366 /* Release reference held by kcf_prov_tab_lookup(). */
367 KCF_PROV_REFRELE(desc);
368
369 /*
370 * Wait till the existing requests complete.
371 */
372 mutex_enter(&desc->pd_lock);
373 while (desc->pd_state != KCF_PROV_FREED)
374 cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
375 mutex_exit(&desc->pd_lock);
376 } else {
377 /*
378 * Wait until requests that have been sent to the provider
379 * complete.
380 */
381 mutex_enter(&desc->pd_lock);
382 while (desc->pd_irefcnt > 0)
383 cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
384 mutex_exit(&desc->pd_lock);
385 }
386
387 kcf_do_notify(desc, B_FALSE);
388
389 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
390 /*
391 * This is the only place where kcf_free_provider_desc()
392 * is called directly. KCF_PROV_REFRELE() should free the
393 * structure in all other places.
394 */
395 ASSERT(desc->pd_state == KCF_PROV_FREED &&
396 desc->pd_refcnt == 0);
397 kcf_free_provider_desc(desc);
398 } else {
399 KCF_PROV_REFRELE(desc);
400 }
401
402 return (CRYPTO_SUCCESS);
403 }
404
405 /*
406 * This routine is used to notify the framework that the state of
407 * a cryptographic provider has changed. Valid state codes are:
408 *
409 * CRYPTO_PROVIDER_READY
410 * The provider indicates that it can process more requests. A provider
411 * will notify with this event if it previously has notified us with a
412 * CRYPTO_PROVIDER_BUSY.
413 *
414 * CRYPTO_PROVIDER_BUSY
415 * The provider can not take more requests.
416 *
417 * CRYPTO_PROVIDER_FAILED
418 * The provider encountered an internal error. The framework will not
419 * be sending any more requests to the provider. The provider may notify
420 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
421 *
422 * This routine can be called from user or interrupt context.
423 */
424 void
425 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
426 {
427 kcf_provider_desc_t *pd;
428
429 /* lookup the provider from the given handle */
430 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
431 return;
432
433 mutex_enter(&pd->pd_lock);
434
435 if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
436 goto out;
437
438 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
439 cmn_err(CE_WARN, "crypto_provider_notification: "
440 "logical provider (%x) ignored\n", handle);
441 goto out;
442 }
443 switch (state) {
444 case CRYPTO_PROVIDER_READY:
445 switch (pd->pd_state) {
446 case KCF_PROV_BUSY:
447 pd->pd_state = KCF_PROV_READY;
448 /*
449 * Signal the per-provider taskq threads that they
450 * can start submitting requests.
451 */
452 cv_broadcast(&pd->pd_resume_cv);
453 break;
454
455 case KCF_PROV_FAILED:
456 /*
457 * The provider recovered from the error. Let us
458 * use it now.
459 */
460 pd->pd_state = KCF_PROV_READY;
461 break;
462 default:
463 break;
464 }
465 break;
466
467 case CRYPTO_PROVIDER_BUSY:
468 switch (pd->pd_state) {
469 case KCF_PROV_READY:
470 pd->pd_state = KCF_PROV_BUSY;
471 break;
472 default:
473 break;
474 }
475 break;
476
477 case CRYPTO_PROVIDER_FAILED:
478 /*
479 * We note the failure and return. The per-provider taskq
480 * threads check this flag and start failing the
481 * requests, if it is set. See process_req_hwp() for details.
482 */
483 switch (pd->pd_state) {
484 case KCF_PROV_READY:
485 pd->pd_state = KCF_PROV_FAILED;
486 break;
487
488 case KCF_PROV_BUSY:
489 pd->pd_state = KCF_PROV_FAILED;
490 /*
491 * The per-provider taskq threads may be waiting. We
492 * signal them so that they can start failing requests.
493 */
494 cv_broadcast(&pd->pd_resume_cv);
495 break;
496 default:
497 break;
498 }
499 break;
500 default:
501 break;
502 }
503 out:
504 mutex_exit(&pd->pd_lock);
505 KCF_PROV_REFRELE(pd);
506 }
507
508 /*
509 * This routine is used to notify the framework the result of
510 * an asynchronous request handled by a provider. Valid error
511 * codes are the same as the CRYPTO_* errors defined in common.h.
512 *
513 * This routine can be called from user or interrupt context.
514 */
515 void
516 crypto_op_notification(crypto_req_handle_t handle, int error)
517 {
518 kcf_call_type_t ctype;
519
520 if (handle == NULL)
521 return;
522
523 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
524 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
525
526 if (error != CRYPTO_SUCCESS)
527 sreq->sn_provider->pd_sched_info.ks_nfails++;
528 KCF_PROV_IREFRELE(sreq->sn_provider);
529 kcf_sop_done(sreq, error);
530 } else {
531 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
532
533 ASSERT(ctype == CRYPTO_ASYNCH);
534 if (error != CRYPTO_SUCCESS)
535 areq->an_provider->pd_sched_info.ks_nfails++;
536 KCF_PROV_IREFRELE(areq->an_provider);
537 kcf_aop_done(areq, error);
538 }
539 }
540
541 /*
542 * This routine is used by software providers to determine
543 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
544 * Note that hardware providers can always use KM_SLEEP. So,
545 * they do not need to call this routine.
546 *
547 * This routine can be called from user or interrupt context.
548 */
549 int
550 crypto_kmflag(crypto_req_handle_t handle)
551 {
552 return (REQHNDL2_KMFLAG(handle));
553 }
554
555 /*
556 * Process the mechanism info structures specified by the provider
557 * during registration. A NULL crypto_provider_info_t indicates
558 * an already initialized provider descriptor.
559 *
560 * Mechanisms are not added to the kernel's mechanism table if the
561 * provider is a logical provider.
562 *
563 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
564 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
565 * if the table of mechanisms is full.
566 */
567 static int
568 init_prov_mechs(const crypto_provider_info_t *info, kcf_provider_desc_t *desc)
569 {
570 uint_t mech_idx;
571 uint_t cleanup_idx;
572 int err = CRYPTO_SUCCESS;
573 kcf_prov_mech_desc_t *pmd;
574 int desc_use_count = 0;
575 int mcount = desc->pd_mech_list_count;
576
577 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
578 if (info != NULL) {
579 ASSERT(info->pi_mechanisms != NULL);
580 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
581 sizeof (crypto_mech_info_t) * mcount);
582 }
583 return (CRYPTO_SUCCESS);
584 }
585
586 /*
587 * Copy the mechanism list from the provider info to the provider
588 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
589 * element if the provider has random_ops since we keep an internal
590 * mechanism, SUN_RANDOM, in this case.
591 */
592 if (info != NULL) {
593 if (info->pi_ops_vector->co_random_ops != NULL) {
594 crypto_mech_info_t *rand_mi;
595
596 /*
597 * Need the following check as it is possible to have
598 * a provider that implements just random_ops and has
599 * pi_mechanisms == NULL.
600 */
601 if (info->pi_mechanisms != NULL) {
602 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
603 sizeof (crypto_mech_info_t) * (mcount - 1));
604 }
605 rand_mi = &desc->pd_mechanisms[mcount - 1];
606
607 bzero(rand_mi, sizeof (crypto_mech_info_t));
608 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
609 CRYPTO_MAX_MECH_NAME);
610 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
611 } else {
612 ASSERT(info->pi_mechanisms != NULL);
613 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
614 sizeof (crypto_mech_info_t) * mcount);
615 }
616 }
617
618 /*
619 * For each mechanism support by the provider, add the provider
620 * to the corresponding KCF mechanism mech_entry chain.
621 */
622 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
623 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
624
625 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
626 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
627 err = CRYPTO_ARGUMENTS_BAD;
628 break;
629 }
630
631 if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
632 mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
633 /*
634 * We ask the provider to specify the limit
635 * per hash mechanism. But, in practice, a
636 * hardware limitation means all hash mechanisms
637 * will have the same maximum size allowed for
638 * input data. So, we make it a per provider
639 * limit to keep it simple.
640 */
641 if (mi->cm_max_input_length == 0) {
642 err = CRYPTO_ARGUMENTS_BAD;
643 break;
644 } else {
645 desc->pd_hash_limit = mi->cm_max_input_length;
646 }
647 }
648
649 if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
650 KCF_SUCCESS)
651 break;
652
653 if (pmd == NULL)
654 continue;
655
656 /* The provider will be used for this mechanism */
657 desc_use_count++;
658 }
659
660 /*
661 * Don't allow multiple software providers with disabled mechanisms
662 * to register. Subsequent enabling of mechanisms will result in
663 * an unsupported configuration, i.e. multiple software providers
664 * per mechanism.
665 */
666 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
667 return (CRYPTO_ARGUMENTS_BAD);
668
669 if (err == KCF_SUCCESS)
670 return (CRYPTO_SUCCESS);
671
672 /*
673 * An error occurred while adding the mechanism, cleanup
674 * and bail.
675 */
676 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
677 kcf_remove_mech_provider(
678 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
679 }
680
681 if (err == KCF_MECH_TAB_FULL)
682 return (CRYPTO_HOST_MEMORY);
683
684 return (CRYPTO_ARGUMENTS_BAD);
685 }
686
687 /*
688 * Update routine for kstat. Only privileged users are allowed to
689 * access this information, since this information is sensitive.
690 * There are some cryptographic attacks (e.g. traffic analysis)
691 * which can use this information.
692 */
693 static int
694 kcf_prov_kstat_update(kstat_t *ksp, int rw)
695 {
696 kcf_prov_stats_t *ks_data;
697 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
698
699 if (rw == KSTAT_WRITE)
700 return (EACCES);
701
702 ks_data = ksp->ks_data;
703
704 ks_data->ps_ops_total.value.ui64 = pd->pd_sched_info.ks_ndispatches;
705 ks_data->ps_ops_failed.value.ui64 = pd->pd_sched_info.ks_nfails;
706 ks_data->ps_ops_busy_rval.value.ui64 = pd->pd_sched_info.ks_nbusy_rval;
707 ks_data->ps_ops_passed.value.ui64 =
708 pd->pd_sched_info.ks_ndispatches -
709 pd->pd_sched_info.ks_nfails -
710 pd->pd_sched_info.ks_nbusy_rval;
711
712 return (0);
713 }
714
715
716 /*
717 * Utility routine called from failure paths in crypto_register_provider()
718 * and from crypto_load_soft_disabled().
719 */
720 void
721 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
722 {
723 uint_t mech_idx;
724
725 /* remove the provider from the mechanisms tables */
726 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
727 mech_idx++) {
728 kcf_remove_mech_provider(
729 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
730 }
731
732 /* remove provider from providers table */
733 if (remove_prov)
734 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
735 }
736
737 /*
738 * Utility routine called from crypto_load_soft_disabled(). Callers
739 * should have done a prior undo_register_provider().
740 */
741 void
742 redo_register_provider(kcf_provider_desc_t *pd)
743 {
744 /* process the mechanisms supported by the provider */
745 (void) init_prov_mechs(NULL, pd);
746
747 /*
748 * Hold provider in providers table. We should not call
749 * kcf_prov_tab_add_provider() here as the provider descriptor
750 * is still valid which means it has an entry in the provider
751 * table.
752 */
753 KCF_PROV_REFHOLD(pd);
754 KCF_PROV_IREFHOLD(pd);
755 }
756
757 /*
758 * Add provider (p1) to another provider's array of providers (p2).
759 * Hardware and logical providers use this array to cross-reference
760 * each other.
761 */
762 static void
763 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
764 {
765 kcf_provider_list_t *new;
766
767 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
768 mutex_enter(&p2->pd_lock);
769 new->pl_next = p2->pd_provider_list;
770 p2->pd_provider_list = new;
771 KCF_PROV_IREFHOLD(p1);
772 new->pl_provider = p1;
773 mutex_exit(&p2->pd_lock);
774 }
775
776 /*
777 * Remove provider (p1) from another provider's array of providers (p2).
778 * Hardware and logical providers use this array to cross-reference
779 * each other.
780 */
781 static void
782 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
783 {
784
785 kcf_provider_list_t *pl = NULL, **prev;
786
787 mutex_enter(&p2->pd_lock);
788 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
789 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
790 if (pl->pl_provider == p1) {
791 break;
792 }
793 }
794
795 if (p1 == NULL) {
796 mutex_exit(&p2->pd_lock);
797 return;
798 }
799
800 /* detach and free kcf_provider_list structure */
801 KCF_PROV_IREFRELE(p1);
802 *prev = pl->pl_next;
803 kmem_free(pl, sizeof (*pl));
804 mutex_exit(&p2->pd_lock);
805 }
806
807 /*
808 * Convert an array of logical provider handles (crypto_provider_id)
809 * stored in a crypto_provider_info structure into an array of provider
810 * descriptors (kcf_provider_desc_t) attached to a logical provider.
811 */
812 static void
813 process_logical_providers(const crypto_provider_info_t *info,
814 kcf_provider_desc_t *hp)
815 {
816 kcf_provider_desc_t *lp;
817 crypto_provider_id_t handle;
818 int count = info->pi_logical_provider_count;
819 int i;
820
821 /* add hardware provider to each logical provider */
822 for (i = 0; i < count; i++) {
823 handle = info->pi_logical_providers[i];
824 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
825 if (lp == NULL) {
826 continue;
827 }
828 add_provider_to_array(hp, lp);
829 hp->pd_flags |= KCF_LPROV_MEMBER;
830
831 /*
832 * A hardware provider has to have the provider descriptor of
833 * every logical provider it belongs to, so it can be removed
834 * from the logical provider if the hardware provider
835 * unregisters from the framework.
836 */
837 add_provider_to_array(lp, hp);
838 KCF_PROV_REFRELE(lp);
839 }
840 }
841
842 /*
843 * This routine removes a provider from all of the logical or
844 * hardware providers it belongs to, and frees the provider's
845 * array of pointers to providers.
846 */
847 static void
848 remove_provider(kcf_provider_desc_t *pp)
849 {
850 kcf_provider_desc_t *p;
851 kcf_provider_list_t *e, *next;
852
853 mutex_enter(&pp->pd_lock);
854 for (e = pp->pd_provider_list; e != NULL; e = next) {
855 p = e->pl_provider;
856 remove_provider_from_array(pp, p);
857 if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
858 p->pd_provider_list == NULL)
859 p->pd_flags &= ~KCF_LPROV_MEMBER;
860 KCF_PROV_IREFRELE(p);
861 next = e->pl_next;
862 kmem_free(e, sizeof (*e));
863 }
864 pp->pd_provider_list = NULL;
865 mutex_exit(&pp->pd_lock);
866 }
867
868 /*
869 * Dispatch events as needed for a provider. is_added flag tells
870 * whether the provider is registering or unregistering.
871 */
872 void
873 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
874 {
875 int i;
876 crypto_notify_event_change_t ec;
877
878 ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
879
880 /*
881 * Inform interested clients of the mechanisms becoming
882 * available/unavailable. We skip this for logical providers
883 * as they do not affect mechanisms.
884 */
885 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
886 ec.ec_provider_type = prov_desc->pd_prov_type;
887 ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
888 CRYPTO_MECH_REMOVED;
889 for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
890 (void) strlcpy(ec.ec_mech_name,
891 prov_desc->pd_mechanisms[i].cm_mech_name,
892 CRYPTO_MAX_MECH_NAME);
893 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
894 }
895
896 }
897
898 /*
899 * Inform interested clients about the new or departing provider.
900 * In case of a logical provider, we need to notify the event only
901 * for the logical provider and not for the underlying
902 * providers which are known by the KCF_LPROV_MEMBER bit.
903 */
904 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
905 (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
906 kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
907 CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
908 }
909 }
910
911 static void
912 delete_kstat(kcf_provider_desc_t *desc)
913 {
914 /* destroy the kstat created for this provider */
915 if (desc->pd_kstat != NULL) {
916 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
917
918 /* release reference held by desc->pd_kstat->ks_private */
919 ASSERT(desc == kspd);
920 kstat_delete(kspd->pd_kstat);
921 desc->pd_kstat = NULL;
922 KCF_PROV_REFRELE(kspd);
923 KCF_PROV_IREFRELE(kspd);
924 }
925 }