1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/crypto.h>
17 #include <crypto/md5.h>
18 #include <crypto/sha.h>
19 #include <crypto/aes.h>
20 #include <crypto/internal/des.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/sched.h>
25 #include <crypto/internal/hash.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/algapi.h>
30 #include <asm/hypervisor.h>
31 #include <asm/mdesc.h>
35 #define DRV_MODULE_NAME "n2_crypto"
36 #define DRV_MODULE_VERSION "0.2"
37 #define DRV_MODULE_RELDATE "July 28, 2011"
39 static const char version
[] =
40 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("Niagara2 Crypto driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION
);
47 #define N2_CRA_PRIORITY 200
49 static DEFINE_MUTEX(spu_lock
);
53 unsigned long qhandle
;
60 struct list_head jobs
;
67 struct list_head list
;
71 struct spu_queue
*queue
;
75 static struct spu_queue
**cpu_to_cwq
;
76 static struct spu_queue
**cpu_to_mau
;
78 static unsigned long spu_next_offset(struct spu_queue
*q
, unsigned long off
)
80 if (q
->q_type
== HV_NCS_QTYPE_MAU
) {
81 off
+= MAU_ENTRY_SIZE
;
82 if (off
== (MAU_ENTRY_SIZE
* MAU_NUM_ENTRIES
))
85 off
+= CWQ_ENTRY_SIZE
;
86 if (off
== (CWQ_ENTRY_SIZE
* CWQ_NUM_ENTRIES
))
92 struct n2_request_common
{
93 struct list_head entry
;
96 #define OFFSET_NOT_RUNNING (~(unsigned int)0)
98 /* An async job request records the final tail value it used in
99 * n2_request_common->offset, test to see if that offset is in
100 * the range old_head, new_head, inclusive.
102 static inline bool job_finished(struct spu_queue
*q
, unsigned int offset
,
103 unsigned long old_head
, unsigned long new_head
)
105 if (old_head
<= new_head
) {
106 if (offset
> old_head
&& offset
<= new_head
)
109 if (offset
> old_head
|| offset
<= new_head
)
115 /* When the HEAD marker is unequal to the actual HEAD, we get
116 * a virtual device INO interrupt. We should process the
117 * completed CWQ entries and adjust the HEAD marker to clear
120 static irqreturn_t
cwq_intr(int irq
, void *dev_id
)
122 unsigned long off
, new_head
, hv_ret
;
123 struct spu_queue
*q
= dev_id
;
125 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
126 smp_processor_id(), q
->qhandle
);
130 hv_ret
= sun4v_ncs_gethead(q
->qhandle
, &new_head
);
132 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
133 smp_processor_id(), new_head
, hv_ret
);
135 for (off
= q
->head
; off
!= new_head
; off
= spu_next_offset(q
, off
)) {
139 hv_ret
= sun4v_ncs_sethead_marker(q
->qhandle
, new_head
);
140 if (hv_ret
== HV_EOK
)
143 spin_unlock(&q
->lock
);
148 static irqreturn_t
mau_intr(int irq
, void *dev_id
)
150 struct spu_queue
*q
= dev_id
;
151 unsigned long head
, hv_ret
;
155 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
156 smp_processor_id(), q
->qhandle
);
158 hv_ret
= sun4v_ncs_gethead(q
->qhandle
, &head
);
160 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
161 smp_processor_id(), head
, hv_ret
);
163 sun4v_ncs_sethead_marker(q
->qhandle
, head
);
165 spin_unlock(&q
->lock
);
170 static void *spu_queue_next(struct spu_queue
*q
, void *cur
)
172 return q
->q
+ spu_next_offset(q
, cur
- q
->q
);
175 static int spu_queue_num_free(struct spu_queue
*q
)
177 unsigned long head
= q
->head
;
178 unsigned long tail
= q
->tail
;
179 unsigned long end
= (CWQ_ENTRY_SIZE
* CWQ_NUM_ENTRIES
);
185 diff
= (end
- tail
) + head
;
187 return (diff
/ CWQ_ENTRY_SIZE
) - 1;
190 static void *spu_queue_alloc(struct spu_queue
*q
, int num_entries
)
192 int avail
= spu_queue_num_free(q
);
194 if (avail
>= num_entries
)
195 return q
->q
+ q
->tail
;
200 static unsigned long spu_queue_submit(struct spu_queue
*q
, void *last
)
202 unsigned long hv_ret
, new_tail
;
204 new_tail
= spu_next_offset(q
, last
- q
->q
);
206 hv_ret
= sun4v_ncs_settail(q
->qhandle
, new_tail
);
207 if (hv_ret
== HV_EOK
)
212 static u64
control_word_base(unsigned int len
, unsigned int hmac_key_len
,
213 int enc_type
, int auth_type
,
214 unsigned int hash_len
,
215 bool sfas
, bool sob
, bool eob
, bool encrypt
,
218 u64 word
= (len
- 1) & CONTROL_LEN
;
220 word
|= ((u64
) opcode
<< CONTROL_OPCODE_SHIFT
);
221 word
|= ((u64
) enc_type
<< CONTROL_ENC_TYPE_SHIFT
);
222 word
|= ((u64
) auth_type
<< CONTROL_AUTH_TYPE_SHIFT
);
224 word
|= CONTROL_STORE_FINAL_AUTH_STATE
;
226 word
|= CONTROL_START_OF_BLOCK
;
228 word
|= CONTROL_END_OF_BLOCK
;
230 word
|= CONTROL_ENCRYPT
;
232 word
|= ((u64
) (hmac_key_len
- 1)) << CONTROL_HMAC_KEY_LEN_SHIFT
;
234 word
|= ((u64
) (hash_len
- 1)) << CONTROL_HASH_LEN_SHIFT
;
240 static inline bool n2_should_run_async(struct spu_queue
*qp
, int this_len
)
242 if (this_len
>= 64 ||
243 qp
->head
!= qp
->tail
)
249 struct n2_ahash_alg
{
250 struct list_head entry
;
252 const u32
*hash_init
;
257 struct ahash_alg alg
;
260 static inline struct n2_ahash_alg
*n2_ahash_alg(struct crypto_tfm
*tfm
)
262 struct crypto_alg
*alg
= tfm
->__crt_alg
;
263 struct ahash_alg
*ahash_alg
;
265 ahash_alg
= container_of(alg
, struct ahash_alg
, halg
.base
);
267 return container_of(ahash_alg
, struct n2_ahash_alg
, alg
);
271 const char *child_alg
;
272 struct n2_ahash_alg derived
;
275 static inline struct n2_hmac_alg
*n2_hmac_alg(struct crypto_tfm
*tfm
)
277 struct crypto_alg
*alg
= tfm
->__crt_alg
;
278 struct ahash_alg
*ahash_alg
;
280 ahash_alg
= container_of(alg
, struct ahash_alg
, halg
.base
);
282 return container_of(ahash_alg
, struct n2_hmac_alg
, derived
.alg
);
286 struct crypto_ahash
*fallback_tfm
;
289 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
292 struct n2_hash_ctx base
;
294 struct crypto_shash
*child_shash
;
297 unsigned char hash_key
[N2_HASH_KEY_MAX
];
300 struct n2_hash_req_ctx
{
302 struct md5_state md5
;
303 struct sha1_state sha1
;
304 struct sha256_state sha256
;
307 struct ahash_request fallback_req
;
310 static int n2_hash_async_init(struct ahash_request
*req
)
312 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
313 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
314 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
316 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
317 rctx
->fallback_req
.base
.flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
319 return crypto_ahash_init(&rctx
->fallback_req
);
322 static int n2_hash_async_update(struct ahash_request
*req
)
324 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
325 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
326 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
328 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
329 rctx
->fallback_req
.base
.flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
330 rctx
->fallback_req
.nbytes
= req
->nbytes
;
331 rctx
->fallback_req
.src
= req
->src
;
333 return crypto_ahash_update(&rctx
->fallback_req
);
336 static int n2_hash_async_final(struct ahash_request
*req
)
338 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
339 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
340 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
342 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
343 rctx
->fallback_req
.base
.flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
344 rctx
->fallback_req
.result
= req
->result
;
346 return crypto_ahash_final(&rctx
->fallback_req
);
349 static int n2_hash_async_finup(struct ahash_request
*req
)
351 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
352 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
353 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
355 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
356 rctx
->fallback_req
.base
.flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
357 rctx
->fallback_req
.nbytes
= req
->nbytes
;
358 rctx
->fallback_req
.src
= req
->src
;
359 rctx
->fallback_req
.result
= req
->result
;
361 return crypto_ahash_finup(&rctx
->fallback_req
);
364 static int n2_hash_async_noimport(struct ahash_request
*req
, const void *in
)
369 static int n2_hash_async_noexport(struct ahash_request
*req
, void *out
)
374 static int n2_hash_cra_init(struct crypto_tfm
*tfm
)
376 const char *fallback_driver_name
= crypto_tfm_alg_name(tfm
);
377 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
378 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
379 struct crypto_ahash
*fallback_tfm
;
382 fallback_tfm
= crypto_alloc_ahash(fallback_driver_name
, 0,
383 CRYPTO_ALG_NEED_FALLBACK
);
384 if (IS_ERR(fallback_tfm
)) {
385 pr_warn("Fallback driver '%s' could not be loaded!\n",
386 fallback_driver_name
);
387 err
= PTR_ERR(fallback_tfm
);
391 crypto_ahash_set_reqsize(ahash
, (sizeof(struct n2_hash_req_ctx
) +
392 crypto_ahash_reqsize(fallback_tfm
)));
394 ctx
->fallback_tfm
= fallback_tfm
;
401 static void n2_hash_cra_exit(struct crypto_tfm
*tfm
)
403 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
404 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
406 crypto_free_ahash(ctx
->fallback_tfm
);
409 static int n2_hmac_cra_init(struct crypto_tfm
*tfm
)
411 const char *fallback_driver_name
= crypto_tfm_alg_name(tfm
);
412 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
413 struct n2_hmac_ctx
*ctx
= crypto_ahash_ctx(ahash
);
414 struct n2_hmac_alg
*n2alg
= n2_hmac_alg(tfm
);
415 struct crypto_ahash
*fallback_tfm
;
416 struct crypto_shash
*child_shash
;
419 fallback_tfm
= crypto_alloc_ahash(fallback_driver_name
, 0,
420 CRYPTO_ALG_NEED_FALLBACK
);
421 if (IS_ERR(fallback_tfm
)) {
422 pr_warn("Fallback driver '%s' could not be loaded!\n",
423 fallback_driver_name
);
424 err
= PTR_ERR(fallback_tfm
);
428 child_shash
= crypto_alloc_shash(n2alg
->child_alg
, 0, 0);
429 if (IS_ERR(child_shash
)) {
430 pr_warn("Child shash '%s' could not be loaded!\n",
432 err
= PTR_ERR(child_shash
);
433 goto out_free_fallback
;
436 crypto_ahash_set_reqsize(ahash
, (sizeof(struct n2_hash_req_ctx
) +
437 crypto_ahash_reqsize(fallback_tfm
)));
439 ctx
->child_shash
= child_shash
;
440 ctx
->base
.fallback_tfm
= fallback_tfm
;
444 crypto_free_ahash(fallback_tfm
);
450 static void n2_hmac_cra_exit(struct crypto_tfm
*tfm
)
452 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
453 struct n2_hmac_ctx
*ctx
= crypto_ahash_ctx(ahash
);
455 crypto_free_ahash(ctx
->base
.fallback_tfm
);
456 crypto_free_shash(ctx
->child_shash
);
459 static int n2_hmac_async_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
462 struct n2_hmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
463 struct crypto_shash
*child_shash
= ctx
->child_shash
;
464 struct crypto_ahash
*fallback_tfm
;
467 fallback_tfm
= ctx
->base
.fallback_tfm
;
468 err
= crypto_ahash_setkey(fallback_tfm
, key
, keylen
);
472 bs
= crypto_shash_blocksize(child_shash
);
473 ds
= crypto_shash_digestsize(child_shash
);
474 BUG_ON(ds
> N2_HASH_KEY_MAX
);
476 err
= crypto_shash_tfm_digest(child_shash
, key
, keylen
,
481 } else if (keylen
<= N2_HASH_KEY_MAX
)
482 memcpy(ctx
->hash_key
, key
, keylen
);
484 ctx
->hash_key_len
= keylen
;
489 static unsigned long wait_for_tail(struct spu_queue
*qp
)
491 unsigned long head
, hv_ret
;
494 hv_ret
= sun4v_ncs_gethead(qp
->qhandle
, &head
);
495 if (hv_ret
!= HV_EOK
) {
496 pr_err("Hypervisor error on gethead\n");
499 if (head
== qp
->tail
) {
507 static unsigned long submit_and_wait_for_tail(struct spu_queue
*qp
,
508 struct cwq_initial_entry
*ent
)
510 unsigned long hv_ret
= spu_queue_submit(qp
, ent
);
512 if (hv_ret
== HV_EOK
)
513 hv_ret
= wait_for_tail(qp
);
518 static int n2_do_async_digest(struct ahash_request
*req
,
519 unsigned int auth_type
, unsigned int digest_size
,
520 unsigned int result_size
, void *hash_loc
,
521 unsigned long auth_key
, unsigned int auth_key_len
)
523 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
524 struct cwq_initial_entry
*ent
;
525 struct crypto_hash_walk walk
;
526 struct spu_queue
*qp
;
531 /* The total effective length of the operation may not
534 if (unlikely(req
->nbytes
> (1 << 16))) {
535 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
536 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
538 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
539 rctx
->fallback_req
.base
.flags
=
540 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
541 rctx
->fallback_req
.nbytes
= req
->nbytes
;
542 rctx
->fallback_req
.src
= req
->src
;
543 rctx
->fallback_req
.result
= req
->result
;
545 return crypto_ahash_digest(&rctx
->fallback_req
);
548 nbytes
= crypto_hash_walk_first(req
, &walk
);
551 qp
= cpu_to_cwq
[cpu
];
555 spin_lock_irqsave(&qp
->lock
, flags
);
557 /* XXX can do better, improve this later by doing a by-hand scatterlist
560 ent
= qp
->q
+ qp
->tail
;
562 ent
->control
= control_word_base(nbytes
, auth_key_len
, 0,
563 auth_type
, digest_size
,
564 false, true, false, false,
567 ent
->src_addr
= __pa(walk
.data
);
568 ent
->auth_key_addr
= auth_key
;
569 ent
->auth_iv_addr
= __pa(hash_loc
);
570 ent
->final_auth_state_addr
= 0UL;
571 ent
->enc_key_addr
= 0UL;
572 ent
->enc_iv_addr
= 0UL;
573 ent
->dest_addr
= __pa(hash_loc
);
575 nbytes
= crypto_hash_walk_done(&walk
, 0);
577 ent
= spu_queue_next(qp
, ent
);
579 ent
->control
= (nbytes
- 1);
580 ent
->src_addr
= __pa(walk
.data
);
581 ent
->auth_key_addr
= 0UL;
582 ent
->auth_iv_addr
= 0UL;
583 ent
->final_auth_state_addr
= 0UL;
584 ent
->enc_key_addr
= 0UL;
585 ent
->enc_iv_addr
= 0UL;
586 ent
->dest_addr
= 0UL;
588 nbytes
= crypto_hash_walk_done(&walk
, 0);
590 ent
->control
|= CONTROL_END_OF_BLOCK
;
592 if (submit_and_wait_for_tail(qp
, ent
) != HV_EOK
)
597 spin_unlock_irqrestore(&qp
->lock
, flags
);
600 memcpy(req
->result
, hash_loc
, result_size
);
607 static int n2_hash_async_digest(struct ahash_request
*req
)
609 struct n2_ahash_alg
*n2alg
= n2_ahash_alg(req
->base
.tfm
);
610 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
613 ds
= n2alg
->digest_size
;
614 if (unlikely(req
->nbytes
== 0)) {
615 memcpy(req
->result
, n2alg
->hash_zero
, ds
);
618 memcpy(&rctx
->u
, n2alg
->hash_init
, n2alg
->hw_op_hashsz
);
620 return n2_do_async_digest(req
, n2alg
->auth_type
,
621 n2alg
->hw_op_hashsz
, ds
,
625 static int n2_hmac_async_digest(struct ahash_request
*req
)
627 struct n2_hmac_alg
*n2alg
= n2_hmac_alg(req
->base
.tfm
);
628 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
629 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
630 struct n2_hmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
633 ds
= n2alg
->derived
.digest_size
;
634 if (unlikely(req
->nbytes
== 0) ||
635 unlikely(ctx
->hash_key_len
> N2_HASH_KEY_MAX
)) {
636 struct n2_hash_req_ctx
*rctx
= ahash_request_ctx(req
);
637 struct n2_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
639 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
640 rctx
->fallback_req
.base
.flags
=
641 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
642 rctx
->fallback_req
.nbytes
= req
->nbytes
;
643 rctx
->fallback_req
.src
= req
->src
;
644 rctx
->fallback_req
.result
= req
->result
;
646 return crypto_ahash_digest(&rctx
->fallback_req
);
648 memcpy(&rctx
->u
, n2alg
->derived
.hash_init
,
649 n2alg
->derived
.hw_op_hashsz
);
651 return n2_do_async_digest(req
, n2alg
->derived
.hmac_type
,
652 n2alg
->derived
.hw_op_hashsz
, ds
,
654 __pa(&ctx
->hash_key
),
658 struct n2_skcipher_context
{
662 u8 aes
[AES_MAX_KEY_SIZE
];
663 u8 des
[DES_KEY_SIZE
];
664 u8 des3
[3 * DES_KEY_SIZE
];
665 u8 arc4
[258]; /* S-box, X, Y */
669 #define N2_CHUNK_ARR_LEN 16
671 struct n2_crypto_chunk
{
672 struct list_head entry
;
673 unsigned long iv_paddr
: 44;
674 unsigned long arr_len
: 20;
675 unsigned long dest_paddr
;
676 unsigned long dest_final
;
678 unsigned long src_paddr
: 44;
679 unsigned long src_len
: 20;
680 } arr
[N2_CHUNK_ARR_LEN
];
683 struct n2_request_context
{
684 struct skcipher_walk walk
;
685 struct list_head chunk_list
;
686 struct n2_crypto_chunk chunk
;
690 /* The SPU allows some level of flexibility for partial cipher blocks
691 * being specified in a descriptor.
693 * It merely requires that every descriptor's length field is at least
694 * as large as the cipher block size. This means that a cipher block
695 * can span at most 2 descriptors. However, this does not allow a
696 * partial block to span into the final descriptor as that would
697 * violate the rule (since every descriptor's length must be at lest
698 * the block size). So, for example, assuming an 8 byte block size:
700 * 0xe --> 0xa --> 0x8
702 * is a valid length sequence, whereas:
704 * 0xe --> 0xb --> 0x7
706 * is not a valid sequence.
709 struct n2_skcipher_alg
{
710 struct list_head entry
;
712 struct skcipher_alg skcipher
;
715 static inline struct n2_skcipher_alg
*n2_skcipher_alg(struct crypto_skcipher
*tfm
)
717 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
719 return container_of(alg
, struct n2_skcipher_alg
, skcipher
);
722 struct n2_skcipher_request_context
{
723 struct skcipher_walk walk
;
726 static int n2_aes_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
729 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
730 struct n2_skcipher_context
*ctx
= crypto_tfm_ctx(tfm
);
731 struct n2_skcipher_alg
*n2alg
= n2_skcipher_alg(skcipher
);
733 ctx
->enc_type
= (n2alg
->enc_type
& ENC_TYPE_CHAINING_MASK
);
736 case AES_KEYSIZE_128
:
737 ctx
->enc_type
|= ENC_TYPE_ALG_AES128
;
739 case AES_KEYSIZE_192
:
740 ctx
->enc_type
|= ENC_TYPE_ALG_AES192
;
742 case AES_KEYSIZE_256
:
743 ctx
->enc_type
|= ENC_TYPE_ALG_AES256
;
749 ctx
->key_len
= keylen
;
750 memcpy(ctx
->key
.aes
, key
, keylen
);
754 static int n2_des_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
757 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
758 struct n2_skcipher_context
*ctx
= crypto_tfm_ctx(tfm
);
759 struct n2_skcipher_alg
*n2alg
= n2_skcipher_alg(skcipher
);
762 err
= verify_skcipher_des_key(skcipher
, key
);
766 ctx
->enc_type
= n2alg
->enc_type
;
768 ctx
->key_len
= keylen
;
769 memcpy(ctx
->key
.des
, key
, keylen
);
773 static int n2_3des_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
776 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
777 struct n2_skcipher_context
*ctx
= crypto_tfm_ctx(tfm
);
778 struct n2_skcipher_alg
*n2alg
= n2_skcipher_alg(skcipher
);
781 err
= verify_skcipher_des3_key(skcipher
, key
);
785 ctx
->enc_type
= n2alg
->enc_type
;
787 ctx
->key_len
= keylen
;
788 memcpy(ctx
->key
.des3
, key
, keylen
);
792 static int n2_arc4_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
795 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(skcipher
);
796 struct n2_skcipher_context
*ctx
= crypto_tfm_ctx(tfm
);
797 struct n2_skcipher_alg
*n2alg
= n2_skcipher_alg(skcipher
);
798 u8
*s
= ctx
->key
.arc4
;
803 ctx
->enc_type
= n2alg
->enc_type
;
808 for (i
= 0; i
< 256; i
++)
810 for (i
= 0; i
< 256; i
++) {
812 j
= (j
+ key
[k
] + a
) & 0xff;
822 static inline int skcipher_descriptor_len(int nbytes
, unsigned int block_size
)
824 int this_len
= nbytes
;
826 this_len
-= (nbytes
& (block_size
- 1));
827 return this_len
> (1 << 16) ? (1 << 16) : this_len
;
830 static int __n2_crypt_chunk(struct crypto_skcipher
*skcipher
,
831 struct n2_crypto_chunk
*cp
,
832 struct spu_queue
*qp
, bool encrypt
)
834 struct n2_skcipher_context
*ctx
= crypto_skcipher_ctx(skcipher
);
835 struct cwq_initial_entry
*ent
;
839 ent
= spu_queue_alloc(qp
, cp
->arr_len
);
841 pr_info("queue_alloc() of %d fails\n",
846 in_place
= (cp
->dest_paddr
== cp
->arr
[0].src_paddr
);
848 ent
->control
= control_word_base(cp
->arr
[0].src_len
,
849 0, ctx
->enc_type
, 0, 0,
850 false, true, false, encrypt
,
852 (in_place
? OPCODE_INPLACE_BIT
: 0));
853 ent
->src_addr
= cp
->arr
[0].src_paddr
;
854 ent
->auth_key_addr
= 0UL;
855 ent
->auth_iv_addr
= 0UL;
856 ent
->final_auth_state_addr
= 0UL;
857 ent
->enc_key_addr
= __pa(&ctx
->key
);
858 ent
->enc_iv_addr
= cp
->iv_paddr
;
859 ent
->dest_addr
= (in_place
? 0UL : cp
->dest_paddr
);
861 for (i
= 1; i
< cp
->arr_len
; i
++) {
862 ent
= spu_queue_next(qp
, ent
);
864 ent
->control
= cp
->arr
[i
].src_len
- 1;
865 ent
->src_addr
= cp
->arr
[i
].src_paddr
;
866 ent
->auth_key_addr
= 0UL;
867 ent
->auth_iv_addr
= 0UL;
868 ent
->final_auth_state_addr
= 0UL;
869 ent
->enc_key_addr
= 0UL;
870 ent
->enc_iv_addr
= 0UL;
871 ent
->dest_addr
= 0UL;
873 ent
->control
|= CONTROL_END_OF_BLOCK
;
875 return (spu_queue_submit(qp
, ent
) != HV_EOK
) ? -EINVAL
: 0;
878 static int n2_compute_chunks(struct skcipher_request
*req
)
880 struct n2_request_context
*rctx
= skcipher_request_ctx(req
);
881 struct skcipher_walk
*walk
= &rctx
->walk
;
882 struct n2_crypto_chunk
*chunk
;
883 unsigned long dest_prev
;
884 unsigned int tot_len
;
888 err
= skcipher_walk_async(walk
, req
);
892 INIT_LIST_HEAD(&rctx
->chunk_list
);
894 chunk
= &rctx
->chunk
;
895 INIT_LIST_HEAD(&chunk
->entry
);
897 chunk
->iv_paddr
= 0UL;
899 chunk
->dest_paddr
= 0UL;
901 prev_in_place
= false;
905 while ((nbytes
= walk
->nbytes
) != 0) {
906 unsigned long dest_paddr
, src_paddr
;
910 src_paddr
= (page_to_phys(walk
->src
.phys
.page
) +
911 walk
->src
.phys
.offset
);
912 dest_paddr
= (page_to_phys(walk
->dst
.phys
.page
) +
913 walk
->dst
.phys
.offset
);
914 in_place
= (src_paddr
== dest_paddr
);
915 this_len
= skcipher_descriptor_len(nbytes
, walk
->blocksize
);
917 if (chunk
->arr_len
!= 0) {
918 if (in_place
!= prev_in_place
||
920 dest_paddr
!= dest_prev
) ||
921 chunk
->arr_len
== N2_CHUNK_ARR_LEN
||
922 tot_len
+ this_len
> (1 << 16)) {
923 chunk
->dest_final
= dest_prev
;
924 list_add_tail(&chunk
->entry
,
926 chunk
= kzalloc(sizeof(*chunk
), GFP_ATOMIC
);
931 INIT_LIST_HEAD(&chunk
->entry
);
934 if (chunk
->arr_len
== 0) {
935 chunk
->dest_paddr
= dest_paddr
;
938 chunk
->arr
[chunk
->arr_len
].src_paddr
= src_paddr
;
939 chunk
->arr
[chunk
->arr_len
].src_len
= this_len
;
942 dest_prev
= dest_paddr
+ this_len
;
943 prev_in_place
= in_place
;
946 err
= skcipher_walk_done(walk
, nbytes
- this_len
);
950 if (!err
&& chunk
->arr_len
!= 0) {
951 chunk
->dest_final
= dest_prev
;
952 list_add_tail(&chunk
->entry
, &rctx
->chunk_list
);
958 static void n2_chunk_complete(struct skcipher_request
*req
, void *final_iv
)
960 struct n2_request_context
*rctx
= skcipher_request_ctx(req
);
961 struct n2_crypto_chunk
*c
, *tmp
;
964 memcpy(rctx
->walk
.iv
, final_iv
, rctx
->walk
.blocksize
);
966 list_for_each_entry_safe(c
, tmp
, &rctx
->chunk_list
, entry
) {
968 if (unlikely(c
!= &rctx
->chunk
))
974 static int n2_do_ecb(struct skcipher_request
*req
, bool encrypt
)
976 struct n2_request_context
*rctx
= skcipher_request_ctx(req
);
977 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
978 int err
= n2_compute_chunks(req
);
979 struct n2_crypto_chunk
*c
, *tmp
;
980 unsigned long flags
, hv_ret
;
981 struct spu_queue
*qp
;
986 qp
= cpu_to_cwq
[get_cpu()];
991 spin_lock_irqsave(&qp
->lock
, flags
);
993 list_for_each_entry_safe(c
, tmp
, &rctx
->chunk_list
, entry
) {
994 err
= __n2_crypt_chunk(tfm
, c
, qp
, encrypt
);
998 if (unlikely(c
!= &rctx
->chunk
))
1002 hv_ret
= wait_for_tail(qp
);
1003 if (hv_ret
!= HV_EOK
)
1007 spin_unlock_irqrestore(&qp
->lock
, flags
);
1012 n2_chunk_complete(req
, NULL
);
1016 static int n2_encrypt_ecb(struct skcipher_request
*req
)
1018 return n2_do_ecb(req
, true);
1021 static int n2_decrypt_ecb(struct skcipher_request
*req
)
1023 return n2_do_ecb(req
, false);
1026 static int n2_do_chaining(struct skcipher_request
*req
, bool encrypt
)
1028 struct n2_request_context
*rctx
= skcipher_request_ctx(req
);
1029 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1030 unsigned long flags
, hv_ret
, iv_paddr
;
1031 int err
= n2_compute_chunks(req
);
1032 struct n2_crypto_chunk
*c
, *tmp
;
1033 struct spu_queue
*qp
;
1034 void *final_iv_addr
;
1036 final_iv_addr
= NULL
;
1041 qp
= cpu_to_cwq
[get_cpu()];
1046 spin_lock_irqsave(&qp
->lock
, flags
);
1049 iv_paddr
= __pa(rctx
->walk
.iv
);
1050 list_for_each_entry_safe(c
, tmp
, &rctx
->chunk_list
,
1052 c
->iv_paddr
= iv_paddr
;
1053 err
= __n2_crypt_chunk(tfm
, c
, qp
, true);
1056 iv_paddr
= c
->dest_final
- rctx
->walk
.blocksize
;
1057 list_del(&c
->entry
);
1058 if (unlikely(c
!= &rctx
->chunk
))
1061 final_iv_addr
= __va(iv_paddr
);
1063 list_for_each_entry_safe_reverse(c
, tmp
, &rctx
->chunk_list
,
1065 if (c
== &rctx
->chunk
) {
1066 iv_paddr
= __pa(rctx
->walk
.iv
);
1068 iv_paddr
= (tmp
->arr
[tmp
->arr_len
-1].src_paddr
+
1069 tmp
->arr
[tmp
->arr_len
-1].src_len
-
1070 rctx
->walk
.blocksize
);
1072 if (!final_iv_addr
) {
1075 pa
= (c
->arr
[c
->arr_len
-1].src_paddr
+
1076 c
->arr
[c
->arr_len
-1].src_len
-
1077 rctx
->walk
.blocksize
);
1078 final_iv_addr
= rctx
->temp_iv
;
1079 memcpy(rctx
->temp_iv
, __va(pa
),
1080 rctx
->walk
.blocksize
);
1082 c
->iv_paddr
= iv_paddr
;
1083 err
= __n2_crypt_chunk(tfm
, c
, qp
, false);
1086 list_del(&c
->entry
);
1087 if (unlikely(c
!= &rctx
->chunk
))
1092 hv_ret
= wait_for_tail(qp
);
1093 if (hv_ret
!= HV_EOK
)
1097 spin_unlock_irqrestore(&qp
->lock
, flags
);
1102 n2_chunk_complete(req
, err
? NULL
: final_iv_addr
);
1106 static int n2_encrypt_chaining(struct skcipher_request
*req
)
1108 return n2_do_chaining(req
, true);
1111 static int n2_decrypt_chaining(struct skcipher_request
*req
)
1113 return n2_do_chaining(req
, false);
1116 struct n2_skcipher_tmpl
{
1118 const char *drv_name
;
1121 struct skcipher_alg skcipher
;
1124 static const struct n2_skcipher_tmpl skcipher_tmpls
[] = {
1125 /* ARC4: only ECB is supported (chaining bits ignored) */
1126 { .name
= "ecb(arc4)",
1127 .drv_name
= "ecb-arc4",
1129 .enc_type
= (ENC_TYPE_ALG_RC4_STREAM
|
1130 ENC_TYPE_CHAINING_ECB
),
1134 .setkey
= n2_arc4_setkey
,
1135 .encrypt
= n2_encrypt_ecb
,
1136 .decrypt
= n2_decrypt_ecb
,
1140 /* DES: ECB CBC and CFB are supported */
1141 { .name
= "ecb(des)",
1142 .drv_name
= "ecb-des",
1143 .block_size
= DES_BLOCK_SIZE
,
1144 .enc_type
= (ENC_TYPE_ALG_DES
|
1145 ENC_TYPE_CHAINING_ECB
),
1147 .min_keysize
= DES_KEY_SIZE
,
1148 .max_keysize
= DES_KEY_SIZE
,
1149 .setkey
= n2_des_setkey
,
1150 .encrypt
= n2_encrypt_ecb
,
1151 .decrypt
= n2_decrypt_ecb
,
1154 { .name
= "cbc(des)",
1155 .drv_name
= "cbc-des",
1156 .block_size
= DES_BLOCK_SIZE
,
1157 .enc_type
= (ENC_TYPE_ALG_DES
|
1158 ENC_TYPE_CHAINING_CBC
),
1160 .ivsize
= DES_BLOCK_SIZE
,
1161 .min_keysize
= DES_KEY_SIZE
,
1162 .max_keysize
= DES_KEY_SIZE
,
1163 .setkey
= n2_des_setkey
,
1164 .encrypt
= n2_encrypt_chaining
,
1165 .decrypt
= n2_decrypt_chaining
,
1168 { .name
= "cfb(des)",
1169 .drv_name
= "cfb-des",
1170 .block_size
= DES_BLOCK_SIZE
,
1171 .enc_type
= (ENC_TYPE_ALG_DES
|
1172 ENC_TYPE_CHAINING_CFB
),
1174 .min_keysize
= DES_KEY_SIZE
,
1175 .max_keysize
= DES_KEY_SIZE
,
1176 .setkey
= n2_des_setkey
,
1177 .encrypt
= n2_encrypt_chaining
,
1178 .decrypt
= n2_decrypt_chaining
,
1182 /* 3DES: ECB CBC and CFB are supported */
1183 { .name
= "ecb(des3_ede)",
1184 .drv_name
= "ecb-3des",
1185 .block_size
= DES_BLOCK_SIZE
,
1186 .enc_type
= (ENC_TYPE_ALG_3DES
|
1187 ENC_TYPE_CHAINING_ECB
),
1189 .min_keysize
= 3 * DES_KEY_SIZE
,
1190 .max_keysize
= 3 * DES_KEY_SIZE
,
1191 .setkey
= n2_3des_setkey
,
1192 .encrypt
= n2_encrypt_ecb
,
1193 .decrypt
= n2_decrypt_ecb
,
1196 { .name
= "cbc(des3_ede)",
1197 .drv_name
= "cbc-3des",
1198 .block_size
= DES_BLOCK_SIZE
,
1199 .enc_type
= (ENC_TYPE_ALG_3DES
|
1200 ENC_TYPE_CHAINING_CBC
),
1202 .ivsize
= DES_BLOCK_SIZE
,
1203 .min_keysize
= 3 * DES_KEY_SIZE
,
1204 .max_keysize
= 3 * DES_KEY_SIZE
,
1205 .setkey
= n2_3des_setkey
,
1206 .encrypt
= n2_encrypt_chaining
,
1207 .decrypt
= n2_decrypt_chaining
,
1210 { .name
= "cfb(des3_ede)",
1211 .drv_name
= "cfb-3des",
1212 .block_size
= DES_BLOCK_SIZE
,
1213 .enc_type
= (ENC_TYPE_ALG_3DES
|
1214 ENC_TYPE_CHAINING_CFB
),
1216 .min_keysize
= 3 * DES_KEY_SIZE
,
1217 .max_keysize
= 3 * DES_KEY_SIZE
,
1218 .setkey
= n2_3des_setkey
,
1219 .encrypt
= n2_encrypt_chaining
,
1220 .decrypt
= n2_decrypt_chaining
,
1223 /* AES: ECB CBC and CTR are supported */
1224 { .name
= "ecb(aes)",
1225 .drv_name
= "ecb-aes",
1226 .block_size
= AES_BLOCK_SIZE
,
1227 .enc_type
= (ENC_TYPE_ALG_AES128
|
1228 ENC_TYPE_CHAINING_ECB
),
1230 .min_keysize
= AES_MIN_KEY_SIZE
,
1231 .max_keysize
= AES_MAX_KEY_SIZE
,
1232 .setkey
= n2_aes_setkey
,
1233 .encrypt
= n2_encrypt_ecb
,
1234 .decrypt
= n2_decrypt_ecb
,
1237 { .name
= "cbc(aes)",
1238 .drv_name
= "cbc-aes",
1239 .block_size
= AES_BLOCK_SIZE
,
1240 .enc_type
= (ENC_TYPE_ALG_AES128
|
1241 ENC_TYPE_CHAINING_CBC
),
1243 .ivsize
= AES_BLOCK_SIZE
,
1244 .min_keysize
= AES_MIN_KEY_SIZE
,
1245 .max_keysize
= AES_MAX_KEY_SIZE
,
1246 .setkey
= n2_aes_setkey
,
1247 .encrypt
= n2_encrypt_chaining
,
1248 .decrypt
= n2_decrypt_chaining
,
1251 { .name
= "ctr(aes)",
1252 .drv_name
= "ctr-aes",
1253 .block_size
= AES_BLOCK_SIZE
,
1254 .enc_type
= (ENC_TYPE_ALG_AES128
|
1255 ENC_TYPE_CHAINING_COUNTER
),
1257 .ivsize
= AES_BLOCK_SIZE
,
1258 .min_keysize
= AES_MIN_KEY_SIZE
,
1259 .max_keysize
= AES_MAX_KEY_SIZE
,
1260 .setkey
= n2_aes_setkey
,
1261 .encrypt
= n2_encrypt_chaining
,
1262 .decrypt
= n2_encrypt_chaining
,
1267 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1269 static LIST_HEAD(skcipher_algs
);
1271 struct n2_hash_tmpl
{
1273 const u8
*hash_zero
;
1274 const u32
*hash_init
;
1282 static const u32 n2_md5_init
[MD5_HASH_WORDS
] = {
1283 cpu_to_le32(MD5_H0
),
1284 cpu_to_le32(MD5_H1
),
1285 cpu_to_le32(MD5_H2
),
1286 cpu_to_le32(MD5_H3
),
1288 static const u32 n2_sha1_init
[SHA1_DIGEST_SIZE
/ 4] = {
1289 SHA1_H0
, SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
,
1291 static const u32 n2_sha256_init
[SHA256_DIGEST_SIZE
/ 4] = {
1292 SHA256_H0
, SHA256_H1
, SHA256_H2
, SHA256_H3
,
1293 SHA256_H4
, SHA256_H5
, SHA256_H6
, SHA256_H7
,
1295 static const u32 n2_sha224_init
[SHA256_DIGEST_SIZE
/ 4] = {
1296 SHA224_H0
, SHA224_H1
, SHA224_H2
, SHA224_H3
,
1297 SHA224_H4
, SHA224_H5
, SHA224_H6
, SHA224_H7
,
1300 static const struct n2_hash_tmpl hash_tmpls
[] = {
1302 .hash_zero
= md5_zero_message_hash
,
1303 .hash_init
= n2_md5_init
,
1304 .auth_type
= AUTH_TYPE_MD5
,
1305 .hmac_type
= AUTH_TYPE_HMAC_MD5
,
1306 .hw_op_hashsz
= MD5_DIGEST_SIZE
,
1307 .digest_size
= MD5_DIGEST_SIZE
,
1308 .block_size
= MD5_HMAC_BLOCK_SIZE
},
1310 .hash_zero
= sha1_zero_message_hash
,
1311 .hash_init
= n2_sha1_init
,
1312 .auth_type
= AUTH_TYPE_SHA1
,
1313 .hmac_type
= AUTH_TYPE_HMAC_SHA1
,
1314 .hw_op_hashsz
= SHA1_DIGEST_SIZE
,
1315 .digest_size
= SHA1_DIGEST_SIZE
,
1316 .block_size
= SHA1_BLOCK_SIZE
},
1318 .hash_zero
= sha256_zero_message_hash
,
1319 .hash_init
= n2_sha256_init
,
1320 .auth_type
= AUTH_TYPE_SHA256
,
1321 .hmac_type
= AUTH_TYPE_HMAC_SHA256
,
1322 .hw_op_hashsz
= SHA256_DIGEST_SIZE
,
1323 .digest_size
= SHA256_DIGEST_SIZE
,
1324 .block_size
= SHA256_BLOCK_SIZE
},
1326 .hash_zero
= sha224_zero_message_hash
,
1327 .hash_init
= n2_sha224_init
,
1328 .auth_type
= AUTH_TYPE_SHA256
,
1329 .hmac_type
= AUTH_TYPE_RESERVED
,
1330 .hw_op_hashsz
= SHA256_DIGEST_SIZE
,
1331 .digest_size
= SHA224_DIGEST_SIZE
,
1332 .block_size
= SHA224_BLOCK_SIZE
},
1334 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1336 static LIST_HEAD(ahash_algs
);
1337 static LIST_HEAD(hmac_algs
);
1339 static int algs_registered
;
1341 static void __n2_unregister_algs(void)
1343 struct n2_skcipher_alg
*skcipher
, *skcipher_tmp
;
1344 struct n2_ahash_alg
*alg
, *alg_tmp
;
1345 struct n2_hmac_alg
*hmac
, *hmac_tmp
;
1347 list_for_each_entry_safe(skcipher
, skcipher_tmp
, &skcipher_algs
, entry
) {
1348 crypto_unregister_skcipher(&skcipher
->skcipher
);
1349 list_del(&skcipher
->entry
);
1352 list_for_each_entry_safe(hmac
, hmac_tmp
, &hmac_algs
, derived
.entry
) {
1353 crypto_unregister_ahash(&hmac
->derived
.alg
);
1354 list_del(&hmac
->derived
.entry
);
1357 list_for_each_entry_safe(alg
, alg_tmp
, &ahash_algs
, entry
) {
1358 crypto_unregister_ahash(&alg
->alg
);
1359 list_del(&alg
->entry
);
1364 static int n2_skcipher_init_tfm(struct crypto_skcipher
*tfm
)
1366 crypto_skcipher_set_reqsize(tfm
, sizeof(struct n2_request_context
));
1370 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl
*tmpl
)
1372 struct n2_skcipher_alg
*p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
1373 struct skcipher_alg
*alg
;
1380 *alg
= tmpl
->skcipher
;
1382 snprintf(alg
->base
.cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", tmpl
->name
);
1383 snprintf(alg
->base
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s-n2", tmpl
->drv_name
);
1384 alg
->base
.cra_priority
= N2_CRA_PRIORITY
;
1385 alg
->base
.cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
1386 CRYPTO_ALG_ALLOCATES_MEMORY
;
1387 alg
->base
.cra_blocksize
= tmpl
->block_size
;
1388 p
->enc_type
= tmpl
->enc_type
;
1389 alg
->base
.cra_ctxsize
= sizeof(struct n2_skcipher_context
);
1390 alg
->base
.cra_module
= THIS_MODULE
;
1391 alg
->init
= n2_skcipher_init_tfm
;
1393 list_add(&p
->entry
, &skcipher_algs
);
1394 err
= crypto_register_skcipher(alg
);
1396 pr_err("%s alg registration failed\n", alg
->base
.cra_name
);
1397 list_del(&p
->entry
);
1400 pr_info("%s alg registered\n", alg
->base
.cra_name
);
1405 static int __n2_register_one_hmac(struct n2_ahash_alg
*n2ahash
)
1407 struct n2_hmac_alg
*p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
1408 struct ahash_alg
*ahash
;
1409 struct crypto_alg
*base
;
1415 p
->child_alg
= n2ahash
->alg
.halg
.base
.cra_name
;
1416 memcpy(&p
->derived
, n2ahash
, sizeof(struct n2_ahash_alg
));
1417 INIT_LIST_HEAD(&p
->derived
.entry
);
1419 ahash
= &p
->derived
.alg
;
1420 ahash
->digest
= n2_hmac_async_digest
;
1421 ahash
->setkey
= n2_hmac_async_setkey
;
1423 base
= &ahash
->halg
.base
;
1424 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)", p
->child_alg
);
1425 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "hmac-%s-n2", p
->child_alg
);
1427 base
->cra_ctxsize
= sizeof(struct n2_hmac_ctx
);
1428 base
->cra_init
= n2_hmac_cra_init
;
1429 base
->cra_exit
= n2_hmac_cra_exit
;
1431 list_add(&p
->derived
.entry
, &hmac_algs
);
1432 err
= crypto_register_ahash(ahash
);
1434 pr_err("%s alg registration failed\n", base
->cra_name
);
1435 list_del(&p
->derived
.entry
);
1438 pr_info("%s alg registered\n", base
->cra_name
);
1443 static int __n2_register_one_ahash(const struct n2_hash_tmpl
*tmpl
)
1445 struct n2_ahash_alg
*p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
1446 struct hash_alg_common
*halg
;
1447 struct crypto_alg
*base
;
1448 struct ahash_alg
*ahash
;
1454 p
->hash_zero
= tmpl
->hash_zero
;
1455 p
->hash_init
= tmpl
->hash_init
;
1456 p
->auth_type
= tmpl
->auth_type
;
1457 p
->hmac_type
= tmpl
->hmac_type
;
1458 p
->hw_op_hashsz
= tmpl
->hw_op_hashsz
;
1459 p
->digest_size
= tmpl
->digest_size
;
1462 ahash
->init
= n2_hash_async_init
;
1463 ahash
->update
= n2_hash_async_update
;
1464 ahash
->final
= n2_hash_async_final
;
1465 ahash
->finup
= n2_hash_async_finup
;
1466 ahash
->digest
= n2_hash_async_digest
;
1467 ahash
->export
= n2_hash_async_noexport
;
1468 ahash
->import
= n2_hash_async_noimport
;
1470 halg
= &ahash
->halg
;
1471 halg
->digestsize
= tmpl
->digest_size
;
1474 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", tmpl
->name
);
1475 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s-n2", tmpl
->name
);
1476 base
->cra_priority
= N2_CRA_PRIORITY
;
1477 base
->cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
|
1478 CRYPTO_ALG_NEED_FALLBACK
;
1479 base
->cra_blocksize
= tmpl
->block_size
;
1480 base
->cra_ctxsize
= sizeof(struct n2_hash_ctx
);
1481 base
->cra_module
= THIS_MODULE
;
1482 base
->cra_init
= n2_hash_cra_init
;
1483 base
->cra_exit
= n2_hash_cra_exit
;
1485 list_add(&p
->entry
, &ahash_algs
);
1486 err
= crypto_register_ahash(ahash
);
1488 pr_err("%s alg registration failed\n", base
->cra_name
);
1489 list_del(&p
->entry
);
1492 pr_info("%s alg registered\n", base
->cra_name
);
1494 if (!err
&& p
->hmac_type
!= AUTH_TYPE_RESERVED
)
1495 err
= __n2_register_one_hmac(p
);
1499 static int n2_register_algs(void)
1503 mutex_lock(&spu_lock
);
1504 if (algs_registered
++)
1507 for (i
= 0; i
< NUM_HASH_TMPLS
; i
++) {
1508 err
= __n2_register_one_ahash(&hash_tmpls
[i
]);
1510 __n2_unregister_algs();
1514 for (i
= 0; i
< NUM_CIPHER_TMPLS
; i
++) {
1515 err
= __n2_register_one_skcipher(&skcipher_tmpls
[i
]);
1517 __n2_unregister_algs();
1523 mutex_unlock(&spu_lock
);
1527 static void n2_unregister_algs(void)
1529 mutex_lock(&spu_lock
);
1530 if (!--algs_registered
)
1531 __n2_unregister_algs();
1532 mutex_unlock(&spu_lock
);
1535 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1536 * a devino. This isn't very useful to us because all of the
1537 * interrupts listed in the device_node have been translated to
1538 * Linux virtual IRQ cookie numbers.
1540 * So we have to back-translate, going through the 'intr' and 'ino'
1541 * property tables of the n2cp MDESC node, matching it with the OF
1542 * 'interrupts' property entries, in order to to figure out which
1543 * devino goes to which already-translated IRQ.
1545 static int find_devino_index(struct platform_device
*dev
, struct spu_mdesc_info
*ip
,
1546 unsigned long dev_ino
)
1548 const unsigned int *dev_intrs
;
1552 for (i
= 0; i
< ip
->num_intrs
; i
++) {
1553 if (ip
->ino_table
[i
].ino
== dev_ino
)
1556 if (i
== ip
->num_intrs
)
1559 intr
= ip
->ino_table
[i
].intr
;
1561 dev_intrs
= of_get_property(dev
->dev
.of_node
, "interrupts", NULL
);
1565 for (i
= 0; i
< dev
->archdata
.num_irqs
; i
++) {
1566 if (dev_intrs
[i
] == intr
)
1573 static int spu_map_ino(struct platform_device
*dev
, struct spu_mdesc_info
*ip
,
1574 const char *irq_name
, struct spu_queue
*p
,
1575 irq_handler_t handler
)
1580 herr
= sun4v_ncs_qhandle_to_devino(p
->qhandle
, &p
->devino
);
1584 index
= find_devino_index(dev
, ip
, p
->devino
);
1588 p
->irq
= dev
->archdata
.irqs
[index
];
1590 sprintf(p
->irq_name
, "%s-%d", irq_name
, index
);
1592 return request_irq(p
->irq
, handler
, 0, p
->irq_name
, p
);
1595 static struct kmem_cache
*queue_cache
[2];
1597 static void *new_queue(unsigned long q_type
)
1599 return kmem_cache_zalloc(queue_cache
[q_type
- 1], GFP_KERNEL
);
1602 static void free_queue(void *p
, unsigned long q_type
)
1604 kmem_cache_free(queue_cache
[q_type
- 1], p
);
1607 static int queue_cache_init(void)
1609 if (!queue_cache
[HV_NCS_QTYPE_MAU
- 1])
1610 queue_cache
[HV_NCS_QTYPE_MAU
- 1] =
1611 kmem_cache_create("mau_queue",
1614 MAU_ENTRY_SIZE
, 0, NULL
);
1615 if (!queue_cache
[HV_NCS_QTYPE_MAU
- 1])
1618 if (!queue_cache
[HV_NCS_QTYPE_CWQ
- 1])
1619 queue_cache
[HV_NCS_QTYPE_CWQ
- 1] =
1620 kmem_cache_create("cwq_queue",
1623 CWQ_ENTRY_SIZE
, 0, NULL
);
1624 if (!queue_cache
[HV_NCS_QTYPE_CWQ
- 1]) {
1625 kmem_cache_destroy(queue_cache
[HV_NCS_QTYPE_MAU
- 1]);
1626 queue_cache
[HV_NCS_QTYPE_MAU
- 1] = NULL
;
1632 static void queue_cache_destroy(void)
1634 kmem_cache_destroy(queue_cache
[HV_NCS_QTYPE_MAU
- 1]);
1635 kmem_cache_destroy(queue_cache
[HV_NCS_QTYPE_CWQ
- 1]);
1636 queue_cache
[HV_NCS_QTYPE_MAU
- 1] = NULL
;
1637 queue_cache
[HV_NCS_QTYPE_CWQ
- 1] = NULL
;
1640 static long spu_queue_register_workfn(void *arg
)
1642 struct spu_qreg
*qr
= arg
;
1643 struct spu_queue
*p
= qr
->queue
;
1644 unsigned long q_type
= qr
->type
;
1645 unsigned long hv_ret
;
1647 hv_ret
= sun4v_ncs_qconf(q_type
, __pa(p
->q
),
1648 CWQ_NUM_ENTRIES
, &p
->qhandle
);
1650 sun4v_ncs_sethead_marker(p
->qhandle
, 0);
1652 return hv_ret
? -EINVAL
: 0;
1655 static int spu_queue_register(struct spu_queue
*p
, unsigned long q_type
)
1657 int cpu
= cpumask_any_and(&p
->sharing
, cpu_online_mask
);
1658 struct spu_qreg qr
= { .queue
= p
, .type
= q_type
};
1660 return work_on_cpu_safe(cpu
, spu_queue_register_workfn
, &qr
);
1663 static int spu_queue_setup(struct spu_queue
*p
)
1667 p
->q
= new_queue(p
->q_type
);
1671 err
= spu_queue_register(p
, p
->q_type
);
1673 free_queue(p
->q
, p
->q_type
);
1680 static void spu_queue_destroy(struct spu_queue
*p
)
1682 unsigned long hv_ret
;
1687 hv_ret
= sun4v_ncs_qconf(p
->q_type
, p
->qhandle
, 0, &p
->qhandle
);
1690 free_queue(p
->q
, p
->q_type
);
1693 static void spu_list_destroy(struct list_head
*list
)
1695 struct spu_queue
*p
, *n
;
1697 list_for_each_entry_safe(p
, n
, list
, list
) {
1700 for (i
= 0; i
< NR_CPUS
; i
++) {
1701 if (cpu_to_cwq
[i
] == p
)
1702 cpu_to_cwq
[i
] = NULL
;
1706 free_irq(p
->irq
, p
);
1709 spu_queue_destroy(p
);
1715 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1716 * gathering cpu membership information.
1718 static int spu_mdesc_walk_arcs(struct mdesc_handle
*mdesc
,
1719 struct platform_device
*dev
,
1720 u64 node
, struct spu_queue
*p
,
1721 struct spu_queue
**table
)
1725 mdesc_for_each_arc(arc
, mdesc
, node
, MDESC_ARC_TYPE_BACK
) {
1726 u64 tgt
= mdesc_arc_target(mdesc
, arc
);
1727 const char *name
= mdesc_node_name(mdesc
, tgt
);
1730 if (strcmp(name
, "cpu"))
1732 id
= mdesc_get_property(mdesc
, tgt
, "id", NULL
);
1733 if (table
[*id
] != NULL
) {
1734 dev_err(&dev
->dev
, "%pOF: SPU cpu slot already set.\n",
1738 cpumask_set_cpu(*id
, &p
->sharing
);
1744 /* Process an 'exec-unit' MDESC node of type 'cwq'. */
1745 static int handle_exec_unit(struct spu_mdesc_info
*ip
, struct list_head
*list
,
1746 struct platform_device
*dev
, struct mdesc_handle
*mdesc
,
1747 u64 node
, const char *iname
, unsigned long q_type
,
1748 irq_handler_t handler
, struct spu_queue
**table
)
1750 struct spu_queue
*p
;
1753 p
= kzalloc(sizeof(struct spu_queue
), GFP_KERNEL
);
1755 dev_err(&dev
->dev
, "%pOF: Could not allocate SPU queue.\n",
1760 cpumask_clear(&p
->sharing
);
1761 spin_lock_init(&p
->lock
);
1763 INIT_LIST_HEAD(&p
->jobs
);
1764 list_add(&p
->list
, list
);
1766 err
= spu_mdesc_walk_arcs(mdesc
, dev
, node
, p
, table
);
1770 err
= spu_queue_setup(p
);
1774 return spu_map_ino(dev
, ip
, iname
, p
, handler
);
1777 static int spu_mdesc_scan(struct mdesc_handle
*mdesc
, struct platform_device
*dev
,
1778 struct spu_mdesc_info
*ip
, struct list_head
*list
,
1779 const char *exec_name
, unsigned long q_type
,
1780 irq_handler_t handler
, struct spu_queue
**table
)
1785 mdesc_for_each_node_by_name(mdesc
, node
, "exec-unit") {
1788 type
= mdesc_get_property(mdesc
, node
, "type", NULL
);
1789 if (!type
|| strcmp(type
, exec_name
))
1792 err
= handle_exec_unit(ip
, list
, dev
, mdesc
, node
,
1793 exec_name
, q_type
, handler
, table
);
1795 spu_list_destroy(list
);
1803 static int get_irq_props(struct mdesc_handle
*mdesc
, u64 node
,
1804 struct spu_mdesc_info
*ip
)
1810 ino
= mdesc_get_property(mdesc
, node
, "ino", &ino_len
);
1812 printk("NO 'ino'\n");
1816 ip
->num_intrs
= ino_len
/ sizeof(u64
);
1817 ip
->ino_table
= kzalloc((sizeof(struct ino_blob
) *
1823 for (i
= 0; i
< ip
->num_intrs
; i
++) {
1824 struct ino_blob
*b
= &ip
->ino_table
[i
];
1832 static int grab_mdesc_irq_props(struct mdesc_handle
*mdesc
,
1833 struct platform_device
*dev
,
1834 struct spu_mdesc_info
*ip
,
1835 const char *node_name
)
1837 const unsigned int *reg
;
1840 reg
= of_get_property(dev
->dev
.of_node
, "reg", NULL
);
1844 mdesc_for_each_node_by_name(mdesc
, node
, "virtual-device") {
1848 name
= mdesc_get_property(mdesc
, node
, "name", NULL
);
1849 if (!name
|| strcmp(name
, node_name
))
1851 chdl
= mdesc_get_property(mdesc
, node
, "cfg-handle", NULL
);
1852 if (!chdl
|| (*chdl
!= *reg
))
1854 ip
->cfg_handle
= *chdl
;
1855 return get_irq_props(mdesc
, node
, ip
);
1861 static unsigned long n2_spu_hvapi_major
;
1862 static unsigned long n2_spu_hvapi_minor
;
1864 static int n2_spu_hvapi_register(void)
1868 n2_spu_hvapi_major
= 2;
1869 n2_spu_hvapi_minor
= 0;
1871 err
= sun4v_hvapi_register(HV_GRP_NCS
,
1873 &n2_spu_hvapi_minor
);
1876 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1878 n2_spu_hvapi_minor
);
1883 static void n2_spu_hvapi_unregister(void)
1885 sun4v_hvapi_unregister(HV_GRP_NCS
);
1888 static int global_ref
;
1890 static int grab_global_resources(void)
1894 mutex_lock(&spu_lock
);
1899 err
= n2_spu_hvapi_register();
1903 err
= queue_cache_init();
1905 goto out_hvapi_release
;
1908 cpu_to_cwq
= kcalloc(NR_CPUS
, sizeof(struct spu_queue
*),
1911 goto out_queue_cache_destroy
;
1913 cpu_to_mau
= kcalloc(NR_CPUS
, sizeof(struct spu_queue
*),
1916 goto out_free_cwq_table
;
1923 mutex_unlock(&spu_lock
);
1930 out_queue_cache_destroy
:
1931 queue_cache_destroy();
1934 n2_spu_hvapi_unregister();
1938 static void release_global_resources(void)
1940 mutex_lock(&spu_lock
);
1941 if (!--global_ref
) {
1948 queue_cache_destroy();
1949 n2_spu_hvapi_unregister();
1951 mutex_unlock(&spu_lock
);
1954 static struct n2_crypto
*alloc_n2cp(void)
1956 struct n2_crypto
*np
= kzalloc(sizeof(struct n2_crypto
), GFP_KERNEL
);
1959 INIT_LIST_HEAD(&np
->cwq_list
);
1964 static void free_n2cp(struct n2_crypto
*np
)
1966 kfree(np
->cwq_info
.ino_table
);
1967 np
->cwq_info
.ino_table
= NULL
;
1972 static void n2_spu_driver_version(void)
1974 static int n2_spu_version_printed
;
1976 if (n2_spu_version_printed
++ == 0)
1977 pr_info("%s", version
);
1980 static int n2_crypto_probe(struct platform_device
*dev
)
1982 struct mdesc_handle
*mdesc
;
1983 struct n2_crypto
*np
;
1986 n2_spu_driver_version();
1988 pr_info("Found N2CP at %pOF\n", dev
->dev
.of_node
);
1992 dev_err(&dev
->dev
, "%pOF: Unable to allocate n2cp.\n",
1997 err
= grab_global_resources();
1999 dev_err(&dev
->dev
, "%pOF: Unable to grab global resources.\n",
2004 mdesc
= mdesc_grab();
2007 dev_err(&dev
->dev
, "%pOF: Unable to grab MDESC.\n",
2010 goto out_free_global
;
2012 err
= grab_mdesc_irq_props(mdesc
, dev
, &np
->cwq_info
, "n2cp");
2014 dev_err(&dev
->dev
, "%pOF: Unable to grab IRQ props.\n",
2016 mdesc_release(mdesc
);
2017 goto out_free_global
;
2020 err
= spu_mdesc_scan(mdesc
, dev
, &np
->cwq_info
, &np
->cwq_list
,
2021 "cwq", HV_NCS_QTYPE_CWQ
, cwq_intr
,
2023 mdesc_release(mdesc
);
2026 dev_err(&dev
->dev
, "%pOF: CWQ MDESC scan failed.\n",
2028 goto out_free_global
;
2031 err
= n2_register_algs();
2033 dev_err(&dev
->dev
, "%pOF: Unable to register algorithms.\n",
2035 goto out_free_spu_list
;
2038 dev_set_drvdata(&dev
->dev
, np
);
2043 spu_list_destroy(&np
->cwq_list
);
2046 release_global_resources();
2054 static int n2_crypto_remove(struct platform_device
*dev
)
2056 struct n2_crypto
*np
= dev_get_drvdata(&dev
->dev
);
2058 n2_unregister_algs();
2060 spu_list_destroy(&np
->cwq_list
);
2062 release_global_resources();
2069 static struct n2_mau
*alloc_ncp(void)
2071 struct n2_mau
*mp
= kzalloc(sizeof(struct n2_mau
), GFP_KERNEL
);
2074 INIT_LIST_HEAD(&mp
->mau_list
);
2079 static void free_ncp(struct n2_mau
*mp
)
2081 kfree(mp
->mau_info
.ino_table
);
2082 mp
->mau_info
.ino_table
= NULL
;
2087 static int n2_mau_probe(struct platform_device
*dev
)
2089 struct mdesc_handle
*mdesc
;
2093 n2_spu_driver_version();
2095 pr_info("Found NCP at %pOF\n", dev
->dev
.of_node
);
2099 dev_err(&dev
->dev
, "%pOF: Unable to allocate ncp.\n",
2104 err
= grab_global_resources();
2106 dev_err(&dev
->dev
, "%pOF: Unable to grab global resources.\n",
2111 mdesc
= mdesc_grab();
2114 dev_err(&dev
->dev
, "%pOF: Unable to grab MDESC.\n",
2117 goto out_free_global
;
2120 err
= grab_mdesc_irq_props(mdesc
, dev
, &mp
->mau_info
, "ncp");
2122 dev_err(&dev
->dev
, "%pOF: Unable to grab IRQ props.\n",
2124 mdesc_release(mdesc
);
2125 goto out_free_global
;
2128 err
= spu_mdesc_scan(mdesc
, dev
, &mp
->mau_info
, &mp
->mau_list
,
2129 "mau", HV_NCS_QTYPE_MAU
, mau_intr
,
2131 mdesc_release(mdesc
);
2134 dev_err(&dev
->dev
, "%pOF: MAU MDESC scan failed.\n",
2136 goto out_free_global
;
2139 dev_set_drvdata(&dev
->dev
, mp
);
2144 release_global_resources();
2152 static int n2_mau_remove(struct platform_device
*dev
)
2154 struct n2_mau
*mp
= dev_get_drvdata(&dev
->dev
);
2156 spu_list_destroy(&mp
->mau_list
);
2158 release_global_resources();
2165 static const struct of_device_id n2_crypto_match
[] = {
2168 .compatible
= "SUNW,n2-cwq",
2172 .compatible
= "SUNW,vf-cwq",
2176 .compatible
= "SUNW,kt-cwq",
2181 MODULE_DEVICE_TABLE(of
, n2_crypto_match
);
2183 static struct platform_driver n2_crypto_driver
= {
2186 .of_match_table
= n2_crypto_match
,
2188 .probe
= n2_crypto_probe
,
2189 .remove
= n2_crypto_remove
,
2192 static const struct of_device_id n2_mau_match
[] = {
2195 .compatible
= "SUNW,n2-mau",
2199 .compatible
= "SUNW,vf-mau",
2203 .compatible
= "SUNW,kt-mau",
2208 MODULE_DEVICE_TABLE(of
, n2_mau_match
);
2210 static struct platform_driver n2_mau_driver
= {
2213 .of_match_table
= n2_mau_match
,
2215 .probe
= n2_mau_probe
,
2216 .remove
= n2_mau_remove
,
2219 static struct platform_driver
* const drivers
[] = {
2224 static int __init
n2_init(void)
2226 return platform_register_drivers(drivers
, ARRAY_SIZE(drivers
));
2229 static void __exit
n2_exit(void)
2231 platform_unregister_drivers(drivers
, ARRAY_SIZE(drivers
));
2234 module_init(n2_init
);
2235 module_exit(n2_exit
);