Merge tag 'powerpc-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[sfrench/cifs-2.6.git] / drivers / crypto / virtio / virtio_crypto_algs.c
1  /* Algorithms supported by virtio crypto device
2   *
3   * Authors: Gonglei <arei.gonglei@huawei.com>
4   *
5   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
6   *
7   * This program is free software; you can redistribute it and/or modify
8   * it under the terms of the GNU General Public License as published by
9   * the Free Software Foundation; either version 2 of the License, or
10   * (at your option) any later version.
11   *
12   * This program is distributed in the hope that it will be useful,
13   * but WITHOUT ANY WARRANTY; without even the implied warranty of
14   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15   * GNU General Public License for more details.
16   *
17   * You should have received a copy of the GNU General Public License
18   * along with this program; if not, see <http://www.gnu.org/licenses/>.
19   */
20
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
26
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
29
30
31 struct virtio_crypto_ablkcipher_ctx {
32         struct crypto_engine_ctx enginectx;
33         struct virtio_crypto *vcrypto;
34         struct crypto_tfm *tfm;
35
36         struct virtio_crypto_sym_session_info enc_sess_info;
37         struct virtio_crypto_sym_session_info dec_sess_info;
38 };
39
40 struct virtio_crypto_sym_request {
41         struct virtio_crypto_request base;
42
43         /* Cipher or aead */
44         uint32_t type;
45         struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
46         struct ablkcipher_request *ablkcipher_req;
47         uint8_t *iv;
48         /* Encryption? */
49         bool encrypt;
50 };
51
52 struct virtio_crypto_algo {
53         uint32_t algonum;
54         uint32_t service;
55         unsigned int active_devs;
56         struct crypto_alg algo;
57 };
58
59 /*
60  * The algs_lock protects the below global virtio_crypto_active_devs
61  * and crypto algorithms registion.
62  */
63 static DEFINE_MUTEX(algs_lock);
64 static void virtio_crypto_ablkcipher_finalize_req(
65         struct virtio_crypto_sym_request *vc_sym_req,
66         struct ablkcipher_request *req,
67         int err);
68
69 static void virtio_crypto_dataq_sym_callback
70                 (struct virtio_crypto_request *vc_req, int len)
71 {
72         struct virtio_crypto_sym_request *vc_sym_req =
73                 container_of(vc_req, struct virtio_crypto_sym_request, base);
74         struct ablkcipher_request *ablk_req;
75         int error;
76
77         /* Finish the encrypt or decrypt process */
78         if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
79                 switch (vc_req->status) {
80                 case VIRTIO_CRYPTO_OK:
81                         error = 0;
82                         break;
83                 case VIRTIO_CRYPTO_INVSESS:
84                 case VIRTIO_CRYPTO_ERR:
85                         error = -EINVAL;
86                         break;
87                 case VIRTIO_CRYPTO_BADMSG:
88                         error = -EBADMSG;
89                         break;
90                 default:
91                         error = -EIO;
92                         break;
93                 }
94                 ablk_req = vc_sym_req->ablkcipher_req;
95                 virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
96                                                         ablk_req, error);
97         }
98 }
99
100 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
101 {
102         u64 total = 0;
103
104         for (total = 0; sg; sg = sg_next(sg))
105                 total += sg->length;
106
107         return total;
108 }
109
110 static int
111 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
112 {
113         switch (key_len) {
114         case AES_KEYSIZE_128:
115         case AES_KEYSIZE_192:
116         case AES_KEYSIZE_256:
117                 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
118                 break;
119         default:
120                 pr_err("virtio_crypto: Unsupported key length: %d\n",
121                         key_len);
122                 return -EINVAL;
123         }
124         return 0;
125 }
126
127 static int virtio_crypto_alg_ablkcipher_init_session(
128                 struct virtio_crypto_ablkcipher_ctx *ctx,
129                 uint32_t alg, const uint8_t *key,
130                 unsigned int keylen,
131                 int encrypt)
132 {
133         struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
134         unsigned int tmp;
135         struct virtio_crypto *vcrypto = ctx->vcrypto;
136         int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
137         int err;
138         unsigned int num_out = 0, num_in = 0;
139
140         /*
141          * Avoid to do DMA from the stack, switch to using
142          * dynamically-allocated for the key
143          */
144         uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
145
146         if (!cipher_key)
147                 return -ENOMEM;
148
149         memcpy(cipher_key, key, keylen);
150
151         spin_lock(&vcrypto->ctrl_lock);
152         /* Pad ctrl header */
153         vcrypto->ctrl.header.opcode =
154                 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
155         vcrypto->ctrl.header.algo = cpu_to_le32(alg);
156         /* Set the default dataqueue id to 0 */
157         vcrypto->ctrl.header.queue_id = 0;
158
159         vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
160         /* Pad cipher's parameters */
161         vcrypto->ctrl.u.sym_create_session.op_type =
162                 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
163         vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
164                 vcrypto->ctrl.header.algo;
165         vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
166                 cpu_to_le32(keylen);
167         vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
168                 cpu_to_le32(op);
169
170         sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
171         sgs[num_out++] = &outhdr;
172
173         /* Set key */
174         sg_init_one(&key_sg, cipher_key, keylen);
175         sgs[num_out++] = &key_sg;
176
177         /* Return status and session id back */
178         sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
179         sgs[num_out + num_in++] = &inhdr;
180
181         err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
182                                 num_in, vcrypto, GFP_ATOMIC);
183         if (err < 0) {
184                 spin_unlock(&vcrypto->ctrl_lock);
185                 kzfree(cipher_key);
186                 return err;
187         }
188         virtqueue_kick(vcrypto->ctrl_vq);
189
190         /*
191          * Trapping into the hypervisor, so the request should be
192          * handled immediately.
193          */
194         while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
195                !virtqueue_is_broken(vcrypto->ctrl_vq))
196                 cpu_relax();
197
198         if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
199                 spin_unlock(&vcrypto->ctrl_lock);
200                 pr_err("virtio_crypto: Create session failed status: %u\n",
201                         le32_to_cpu(vcrypto->input.status));
202                 kzfree(cipher_key);
203                 return -EINVAL;
204         }
205
206         if (encrypt)
207                 ctx->enc_sess_info.session_id =
208                         le64_to_cpu(vcrypto->input.session_id);
209         else
210                 ctx->dec_sess_info.session_id =
211                         le64_to_cpu(vcrypto->input.session_id);
212
213         spin_unlock(&vcrypto->ctrl_lock);
214
215         kzfree(cipher_key);
216         return 0;
217 }
218
219 static int virtio_crypto_alg_ablkcipher_close_session(
220                 struct virtio_crypto_ablkcipher_ctx *ctx,
221                 int encrypt)
222 {
223         struct scatterlist outhdr, status_sg, *sgs[2];
224         unsigned int tmp;
225         struct virtio_crypto_destroy_session_req *destroy_session;
226         struct virtio_crypto *vcrypto = ctx->vcrypto;
227         int err;
228         unsigned int num_out = 0, num_in = 0;
229
230         spin_lock(&vcrypto->ctrl_lock);
231         vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
232         /* Pad ctrl header */
233         vcrypto->ctrl.header.opcode =
234                 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
235         /* Set the default virtqueue id to 0 */
236         vcrypto->ctrl.header.queue_id = 0;
237
238         destroy_session = &vcrypto->ctrl.u.destroy_session;
239
240         if (encrypt)
241                 destroy_session->session_id =
242                         cpu_to_le64(ctx->enc_sess_info.session_id);
243         else
244                 destroy_session->session_id =
245                         cpu_to_le64(ctx->dec_sess_info.session_id);
246
247         sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
248         sgs[num_out++] = &outhdr;
249
250         /* Return status and session id back */
251         sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
252                 sizeof(vcrypto->ctrl_status.status));
253         sgs[num_out + num_in++] = &status_sg;
254
255         err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
256                         num_in, vcrypto, GFP_ATOMIC);
257         if (err < 0) {
258                 spin_unlock(&vcrypto->ctrl_lock);
259                 return err;
260         }
261         virtqueue_kick(vcrypto->ctrl_vq);
262
263         while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
264                !virtqueue_is_broken(vcrypto->ctrl_vq))
265                 cpu_relax();
266
267         if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
268                 spin_unlock(&vcrypto->ctrl_lock);
269                 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
270                         vcrypto->ctrl_status.status,
271                         destroy_session->session_id);
272
273                 return -EINVAL;
274         }
275         spin_unlock(&vcrypto->ctrl_lock);
276
277         return 0;
278 }
279
280 static int virtio_crypto_alg_ablkcipher_init_sessions(
281                 struct virtio_crypto_ablkcipher_ctx *ctx,
282                 const uint8_t *key, unsigned int keylen)
283 {
284         uint32_t alg;
285         int ret;
286         struct virtio_crypto *vcrypto = ctx->vcrypto;
287
288         if (keylen > vcrypto->max_cipher_key_len) {
289                 pr_err("virtio_crypto: the key is too long\n");
290                 goto bad_key;
291         }
292
293         if (virtio_crypto_alg_validate_key(keylen, &alg))
294                 goto bad_key;
295
296         /* Create encryption session */
297         ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
298                         alg, key, keylen, 1);
299         if (ret)
300                 return ret;
301         /* Create decryption session */
302         ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
303                         alg, key, keylen, 0);
304         if (ret) {
305                 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
306                 return ret;
307         }
308         return 0;
309
310 bad_key:
311         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
312         return -EINVAL;
313 }
314
315 /* Note: kernel crypto API realization */
316 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
317                                          const uint8_t *key,
318                                          unsigned int keylen)
319 {
320         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
321         uint32_t alg;
322         int ret;
323
324         ret = virtio_crypto_alg_validate_key(keylen, &alg);
325         if (ret)
326                 return ret;
327
328         if (!ctx->vcrypto) {
329                 /* New key */
330                 int node = virtio_crypto_get_current_node();
331                 struct virtio_crypto *vcrypto =
332                                       virtcrypto_get_dev_node(node,
333                                       VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
334                 if (!vcrypto) {
335                         pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
336                         return -ENODEV;
337                 }
338
339                 ctx->vcrypto = vcrypto;
340         } else {
341                 /* Rekeying, we should close the created sessions previously */
342                 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
343                 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
344         }
345
346         ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
347         if (ret) {
348                 virtcrypto_dev_put(ctx->vcrypto);
349                 ctx->vcrypto = NULL;
350
351                 return ret;
352         }
353
354         return 0;
355 }
356
357 static int
358 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
359                 struct ablkcipher_request *req,
360                 struct data_queue *data_vq)
361 {
362         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
363         struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
364         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
365         unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
366         struct virtio_crypto *vcrypto = ctx->vcrypto;
367         struct virtio_crypto_op_data_req *req_data;
368         int src_nents, dst_nents;
369         int err;
370         unsigned long flags;
371         struct scatterlist outhdr, iv_sg, status_sg, **sgs;
372         int i;
373         u64 dst_len;
374         unsigned int num_out = 0, num_in = 0;
375         int sg_total;
376         uint8_t *iv;
377
378         src_nents = sg_nents_for_len(req->src, req->nbytes);
379         dst_nents = sg_nents(req->dst);
380
381         pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
382                         src_nents, dst_nents);
383
384         /* Why 3?  outhdr + iv + inhdr */
385         sg_total = src_nents + dst_nents + 3;
386         sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
387                                 dev_to_node(&vcrypto->vdev->dev));
388         if (!sgs)
389                 return -ENOMEM;
390
391         req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
392                                 dev_to_node(&vcrypto->vdev->dev));
393         if (!req_data) {
394                 kfree(sgs);
395                 return -ENOMEM;
396         }
397
398         vc_req->req_data = req_data;
399         vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
400         /* Head of operation */
401         if (vc_sym_req->encrypt) {
402                 req_data->header.session_id =
403                         cpu_to_le64(ctx->enc_sess_info.session_id);
404                 req_data->header.opcode =
405                         cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
406         } else {
407                 req_data->header.session_id =
408                         cpu_to_le64(ctx->dec_sess_info.session_id);
409             req_data->header.opcode =
410                         cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
411         }
412         req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
413         req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
414         req_data->u.sym_req.u.cipher.para.src_data_len =
415                         cpu_to_le32(req->nbytes);
416
417         dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
418         if (unlikely(dst_len > U32_MAX)) {
419                 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
420                 err = -EINVAL;
421                 goto free;
422         }
423
424         pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
425                         req->nbytes, dst_len);
426
427         if (unlikely(req->nbytes + dst_len + ivsize +
428                 sizeof(vc_req->status) > vcrypto->max_size)) {
429                 pr_err("virtio_crypto: The length is too big\n");
430                 err = -EINVAL;
431                 goto free;
432         }
433
434         req_data->u.sym_req.u.cipher.para.dst_data_len =
435                         cpu_to_le32((uint32_t)dst_len);
436
437         /* Outhdr */
438         sg_init_one(&outhdr, req_data, sizeof(*req_data));
439         sgs[num_out++] = &outhdr;
440
441         /* IV */
442
443         /*
444          * Avoid to do DMA from the stack, switch to using
445          * dynamically-allocated for the IV
446          */
447         iv = kzalloc_node(ivsize, GFP_ATOMIC,
448                                 dev_to_node(&vcrypto->vdev->dev));
449         if (!iv) {
450                 err = -ENOMEM;
451                 goto free;
452         }
453         memcpy(iv, req->info, ivsize);
454         sg_init_one(&iv_sg, iv, ivsize);
455         sgs[num_out++] = &iv_sg;
456         vc_sym_req->iv = iv;
457
458         /* Source data */
459         for (i = 0; i < src_nents; i++)
460                 sgs[num_out++] = &req->src[i];
461
462         /* Destination data */
463         for (i = 0; i < dst_nents; i++)
464                 sgs[num_out + num_in++] = &req->dst[i];
465
466         /* Status */
467         sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
468         sgs[num_out + num_in++] = &status_sg;
469
470         vc_req->sgs = sgs;
471
472         spin_lock_irqsave(&data_vq->lock, flags);
473         err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
474                                 num_in, vc_req, GFP_ATOMIC);
475         virtqueue_kick(data_vq->vq);
476         spin_unlock_irqrestore(&data_vq->lock, flags);
477         if (unlikely(err < 0))
478                 goto free_iv;
479
480         return 0;
481
482 free_iv:
483         kzfree(iv);
484 free:
485         kzfree(req_data);
486         kfree(sgs);
487         return err;
488 }
489
490 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
491 {
492         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
493         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
494         struct virtio_crypto_sym_request *vc_sym_req =
495                                 ablkcipher_request_ctx(req);
496         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
497         struct virtio_crypto *vcrypto = ctx->vcrypto;
498         /* Use the first data virtqueue as default */
499         struct data_queue *data_vq = &vcrypto->data_vq[0];
500
501         vc_req->dataq = data_vq;
502         vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
503         vc_sym_req->ablkcipher_ctx = ctx;
504         vc_sym_req->ablkcipher_req = req;
505         vc_sym_req->encrypt = true;
506
507         return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
508 }
509
510 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
511 {
512         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
513         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
514         struct virtio_crypto_sym_request *vc_sym_req =
515                                 ablkcipher_request_ctx(req);
516         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
517         struct virtio_crypto *vcrypto = ctx->vcrypto;
518         /* Use the first data virtqueue as default */
519         struct data_queue *data_vq = &vcrypto->data_vq[0];
520
521         vc_req->dataq = data_vq;
522         vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
523         vc_sym_req->ablkcipher_ctx = ctx;
524         vc_sym_req->ablkcipher_req = req;
525         vc_sym_req->encrypt = false;
526
527         return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
528 }
529
530 static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
531 {
532         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
533
534         tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
535         ctx->tfm = tfm;
536
537         ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
538         ctx->enginectx.op.prepare_request = NULL;
539         ctx->enginectx.op.unprepare_request = NULL;
540         return 0;
541 }
542
543 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
544 {
545         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
546
547         if (!ctx->vcrypto)
548                 return;
549
550         virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
551         virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
552         virtcrypto_dev_put(ctx->vcrypto);
553         ctx->vcrypto = NULL;
554 }
555
556 int virtio_crypto_ablkcipher_crypt_req(
557         struct crypto_engine *engine, void *vreq)
558 {
559         struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
560         struct virtio_crypto_sym_request *vc_sym_req =
561                                 ablkcipher_request_ctx(req);
562         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
563         struct data_queue *data_vq = vc_req->dataq;
564         int ret;
565
566         ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
567         if (ret < 0)
568                 return ret;
569
570         virtqueue_kick(data_vq->vq);
571
572         return 0;
573 }
574
575 static void virtio_crypto_ablkcipher_finalize_req(
576         struct virtio_crypto_sym_request *vc_sym_req,
577         struct ablkcipher_request *req,
578         int err)
579 {
580         crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
581                                            req, err);
582         kzfree(vc_sym_req->iv);
583         virtcrypto_clear_request(&vc_sym_req->base);
584 }
585
586 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
587         .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
588         .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
589         .algo = {
590                 .cra_name = "cbc(aes)",
591                 .cra_driver_name = "virtio_crypto_aes_cbc",
592                 .cra_priority = 150,
593                 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
594                 .cra_blocksize = AES_BLOCK_SIZE,
595                 .cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
596                 .cra_alignmask = 0,
597                 .cra_module = THIS_MODULE,
598                 .cra_type = &crypto_ablkcipher_type,
599                 .cra_init = virtio_crypto_ablkcipher_init,
600                 .cra_exit = virtio_crypto_ablkcipher_exit,
601                 .cra_u = {
602                         .ablkcipher = {
603                                 .setkey = virtio_crypto_ablkcipher_setkey,
604                                 .decrypt = virtio_crypto_ablkcipher_decrypt,
605                                 .encrypt = virtio_crypto_ablkcipher_encrypt,
606                                 .min_keysize = AES_MIN_KEY_SIZE,
607                                 .max_keysize = AES_MAX_KEY_SIZE,
608                                 .ivsize = AES_BLOCK_SIZE,
609                         },
610                 },
611         },
612 } };
613
614 int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
615 {
616         int ret = 0;
617         int i = 0;
618
619         mutex_lock(&algs_lock);
620
621         for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
622
623                 uint32_t service = virtio_crypto_algs[i].service;
624                 uint32_t algonum = virtio_crypto_algs[i].algonum;
625
626                 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
627                         continue;
628
629                 if (virtio_crypto_algs[i].active_devs == 0) {
630                         ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
631                         if (ret)
632                                 goto unlock;
633                 }
634
635                 virtio_crypto_algs[i].active_devs++;
636                 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
637                          virtio_crypto_algs[i].algo.cra_name);
638         }
639
640 unlock:
641         mutex_unlock(&algs_lock);
642         return ret;
643 }
644
645 void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
646 {
647         int i = 0;
648
649         mutex_lock(&algs_lock);
650
651         for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
652
653                 uint32_t service = virtio_crypto_algs[i].service;
654                 uint32_t algonum = virtio_crypto_algs[i].algonum;
655
656                 if (virtio_crypto_algs[i].active_devs == 0 ||
657                     !virtcrypto_algo_is_supported(vcrypto, service, algonum))
658                         continue;
659
660                 if (virtio_crypto_algs[i].active_devs == 1)
661                         crypto_unregister_alg(&virtio_crypto_algs[i].algo);
662
663                 virtio_crypto_algs[i].active_devs--;
664         }
665
666         mutex_unlock(&algs_lock);
667 }