]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ceph/crypto.c
2 #include <linux/ceph/ceph_debug.h>
5 #include <linux/scatterlist.h>
6 #include <linux/slab.h>
7 #include <crypto/aes.h>
8 #include <crypto/skcipher.h>
9 #include <linux/key-type.h>
11 #include <keys/ceph-type.h>
12 #include <keys/user-type.h>
13 #include <linux/ceph/decode.h>
17 * Set ->key and ->tfm. The rest of the key should be filled in before
18 * this function is called.
20 static int set_secret(struct ceph_crypto_key
*key
, void *buf
)
22 unsigned int noio_flag
;
29 case CEPH_CRYPTO_NONE
:
30 return 0; /* nothing to do */
38 key
->key
= kmemdup(buf
, key
->len
, GFP_NOIO
);
44 /* crypto_alloc_skcipher() allocates with GFP_KERNEL */
45 noio_flag
= memalloc_noio_save();
46 key
->tfm
= crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC
);
47 memalloc_noio_restore(noio_flag
);
48 if (IS_ERR(key
->tfm
)) {
49 ret
= PTR_ERR(key
->tfm
);
54 ret
= crypto_skcipher_setkey(key
->tfm
, key
->key
, key
->len
);
61 ceph_crypto_key_destroy(key
);
65 int ceph_crypto_key_clone(struct ceph_crypto_key
*dst
,
66 const struct ceph_crypto_key
*src
)
68 memcpy(dst
, src
, sizeof(struct ceph_crypto_key
));
69 return set_secret(dst
, src
->key
);
72 int ceph_crypto_key_encode(struct ceph_crypto_key
*key
, void **p
, void *end
)
74 if (*p
+ sizeof(u16
) + sizeof(key
->created
) +
75 sizeof(u16
) + key
->len
> end
)
77 ceph_encode_16(p
, key
->type
);
78 ceph_encode_copy(p
, &key
->created
, sizeof(key
->created
));
79 ceph_encode_16(p
, key
->len
);
80 ceph_encode_copy(p
, key
->key
, key
->len
);
84 int ceph_crypto_key_decode(struct ceph_crypto_key
*key
, void **p
, void *end
)
88 ceph_decode_need(p
, end
, 2*sizeof(u16
) + sizeof(key
->created
), bad
);
89 key
->type
= ceph_decode_16(p
);
90 ceph_decode_copy(p
, &key
->created
, sizeof(key
->created
));
91 key
->len
= ceph_decode_16(p
);
92 ceph_decode_need(p
, end
, key
->len
, bad
);
93 ret
= set_secret(key
, *p
);
98 dout("failed to decode crypto key\n");
102 int ceph_crypto_key_unarmor(struct ceph_crypto_key
*key
, const char *inkey
)
104 int inlen
= strlen(inkey
);
105 int blen
= inlen
* 3 / 4;
109 dout("crypto_key_unarmor %s\n", inkey
);
110 buf
= kmalloc(blen
, GFP_NOFS
);
113 blen
= ceph_unarmor(buf
, inkey
, inkey
+inlen
);
120 ret
= ceph_crypto_key_decode(key
, &p
, p
+ blen
);
124 dout("crypto_key_unarmor key %p type %d len %d\n", key
,
125 key
->type
, key
->len
);
129 void ceph_crypto_key_destroy(struct ceph_crypto_key
*key
)
134 crypto_free_skcipher(key
->tfm
);
139 static const u8
*aes_iv
= (u8
*)CEPH_AES_IV
;
142 * Should be used for buffers allocated with ceph_kvmalloc().
143 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
144 * in-buffer (msg front).
146 * Dispose of @sgt with teardown_sgtable().
148 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
149 * in cases where a single sg is sufficient. No attempt to reduce the
150 * number of sgs by squeezing physically contiguous pages together is
151 * made though, for simplicity.
153 static int setup_sgtable(struct sg_table
*sgt
, struct scatterlist
*prealloc_sg
,
154 const void *buf
, unsigned int buf_len
)
156 struct scatterlist
*sg
;
157 const bool is_vmalloc
= is_vmalloc_addr(buf
);
158 unsigned int off
= offset_in_page(buf
);
159 unsigned int chunk_cnt
= 1;
160 unsigned int chunk_len
= PAGE_ALIGN(off
+ buf_len
);
165 memset(sgt
, 0, sizeof(*sgt
));
170 chunk_cnt
= chunk_len
>> PAGE_SHIFT
;
171 chunk_len
= PAGE_SIZE
;
175 ret
= sg_alloc_table(sgt
, chunk_cnt
, GFP_NOFS
);
179 WARN_ON(chunk_cnt
!= 1);
180 sg_init_table(prealloc_sg
, 1);
181 sgt
->sgl
= prealloc_sg
;
182 sgt
->nents
= sgt
->orig_nents
= 1;
185 for_each_sg(sgt
->sgl
, sg
, sgt
->orig_nents
, i
) {
187 unsigned int len
= min(chunk_len
- off
, buf_len
);
190 page
= vmalloc_to_page(buf
);
192 page
= virt_to_page(buf
);
194 sg_set_page(sg
, page
, len
, off
);
200 WARN_ON(buf_len
!= 0);
205 static void teardown_sgtable(struct sg_table
*sgt
)
207 if (sgt
->orig_nents
> 1)
211 static int ceph_aes_crypt(const struct ceph_crypto_key
*key
, bool encrypt
,
212 void *buf
, int buf_len
, int in_len
, int *pout_len
)
214 SKCIPHER_REQUEST_ON_STACK(req
, key
->tfm
);
216 struct scatterlist prealloc_sg
;
217 char iv
[AES_BLOCK_SIZE
] __aligned(8);
218 int pad_byte
= AES_BLOCK_SIZE
- (in_len
& (AES_BLOCK_SIZE
- 1));
219 int crypt_len
= encrypt
? in_len
+ pad_byte
: in_len
;
222 WARN_ON(crypt_len
> buf_len
);
224 memset(buf
+ in_len
, pad_byte
, pad_byte
);
225 ret
= setup_sgtable(&sgt
, &prealloc_sg
, buf
, crypt_len
);
229 memcpy(iv
, aes_iv
, AES_BLOCK_SIZE
);
230 skcipher_request_set_tfm(req
, key
->tfm
);
231 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
232 skcipher_request_set_crypt(req
, sgt
.sgl
, sgt
.sgl
, crypt_len
, iv
);
235 print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
236 key->key, key->len, 1);
237 print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
241 ret
= crypto_skcipher_encrypt(req
);
243 ret
= crypto_skcipher_decrypt(req
);
244 skcipher_request_zero(req
);
246 pr_err("%s %scrypt failed: %d\n", __func__
,
247 encrypt
? "en" : "de", ret
);
251 print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
256 *pout_len
= crypt_len
;
258 pad_byte
= *(char *)(buf
+ in_len
- 1);
259 if (pad_byte
> 0 && pad_byte
<= AES_BLOCK_SIZE
&&
260 in_len
>= pad_byte
) {
261 *pout_len
= in_len
- pad_byte
;
263 pr_err("%s got bad padding %d on in_len %d\n",
264 __func__
, pad_byte
, in_len
);
271 teardown_sgtable(&sgt
);
275 int ceph_crypt(const struct ceph_crypto_key
*key
, bool encrypt
,
276 void *buf
, int buf_len
, int in_len
, int *pout_len
)
279 case CEPH_CRYPTO_NONE
:
282 case CEPH_CRYPTO_AES
:
283 return ceph_aes_crypt(key
, encrypt
, buf
, buf_len
, in_len
,
290 static int ceph_key_preparse(struct key_preparsed_payload
*prep
)
292 struct ceph_crypto_key
*ckey
;
293 size_t datalen
= prep
->datalen
;
298 if (datalen
<= 0 || datalen
> 32767 || !prep
->data
)
302 ckey
= kmalloc(sizeof(*ckey
), GFP_KERNEL
);
306 /* TODO ceph_crypto_key_decode should really take const input */
307 p
= (void *)prep
->data
;
308 ret
= ceph_crypto_key_decode(ckey
, &p
, (char*)prep
->data
+datalen
);
312 prep
->payload
.data
[0] = ckey
;
313 prep
->quotalen
= datalen
;
322 static void ceph_key_free_preparse(struct key_preparsed_payload
*prep
)
324 struct ceph_crypto_key
*ckey
= prep
->payload
.data
[0];
325 ceph_crypto_key_destroy(ckey
);
329 static void ceph_key_destroy(struct key
*key
)
331 struct ceph_crypto_key
*ckey
= key
->payload
.data
[0];
333 ceph_crypto_key_destroy(ckey
);
337 struct key_type key_type_ceph
= {
339 .preparse
= ceph_key_preparse
,
340 .free_preparse
= ceph_key_free_preparse
,
341 .instantiate
= generic_key_instantiate
,
342 .destroy
= ceph_key_destroy
,
345 int ceph_crypto_init(void) {
346 return register_key_type(&key_type_ceph
);
349 void ceph_crypto_shutdown(void) {
350 unregister_key_type(&key_type_ceph
);