Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68                                        ICP_QAT_HW_CIPHER_NO_CONVERT, \
69                                        ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73                                        ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74                                        ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78
79 struct qat_alg_buf {
80         uint32_t len;
81         uint32_t resrvd;
82         uint64_t addr;
83 } __packed;
84
85 struct qat_alg_buf_list {
86         uint64_t resrvd;
87         uint32_t num_bufs;
88         uint32_t num_mapped_bufs;
89         struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91
92 /* Common content descriptor */
93 struct qat_alg_cd {
94         union {
95                 struct qat_enc { /* Encrypt content desc */
96                         struct icp_qat_hw_cipher_algo_blk cipher;
97                         struct icp_qat_hw_auth_algo_blk hash;
98                 } qat_enc_cd;
99                 struct qat_dec { /* Decrytp content desc */
100                         struct icp_qat_hw_auth_algo_blk hash;
101                         struct icp_qat_hw_cipher_algo_blk cipher;
102                 } qat_dec_cd;
103         };
104 } __aligned(64);
105
106 struct qat_alg_aead_ctx {
107         struct qat_alg_cd *enc_cd;
108         struct qat_alg_cd *dec_cd;
109         dma_addr_t enc_cd_paddr;
110         dma_addr_t dec_cd_paddr;
111         struct icp_qat_fw_la_bulk_req enc_fw_req;
112         struct icp_qat_fw_la_bulk_req dec_fw_req;
113         struct crypto_shash *hash_tfm;
114         enum icp_qat_hw_auth_algo qat_hash_alg;
115         struct qat_crypto_instance *inst;
116         union {
117                 struct sha1_state sha1;
118                 struct sha256_state sha256;
119                 struct sha512_state sha512;
120         };
121         char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
122         char opad[SHA512_BLOCK_SIZE];
123 };
124
125 struct qat_alg_ablkcipher_ctx {
126         struct icp_qat_hw_cipher_algo_blk *enc_cd;
127         struct icp_qat_hw_cipher_algo_blk *dec_cd;
128         dma_addr_t enc_cd_paddr;
129         dma_addr_t dec_cd_paddr;
130         struct icp_qat_fw_la_bulk_req enc_fw_req;
131         struct icp_qat_fw_la_bulk_req dec_fw_req;
132         struct qat_crypto_instance *inst;
133         struct crypto_tfm *tfm;
134         spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
135 };
136
137 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
138 {
139         switch (qat_hash_alg) {
140         case ICP_QAT_HW_AUTH_ALGO_SHA1:
141                 return ICP_QAT_HW_SHA1_STATE1_SZ;
142         case ICP_QAT_HW_AUTH_ALGO_SHA256:
143                 return ICP_QAT_HW_SHA256_STATE1_SZ;
144         case ICP_QAT_HW_AUTH_ALGO_SHA512:
145                 return ICP_QAT_HW_SHA512_STATE1_SZ;
146         default:
147                 return -EFAULT;
148         };
149         return -EFAULT;
150 }
151
152 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
153                                   struct qat_alg_aead_ctx *ctx,
154                                   const uint8_t *auth_key,
155                                   unsigned int auth_keylen)
156 {
157         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
158         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
159         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
160         __be32 *hash_state_out;
161         __be64 *hash512_state_out;
162         int i, offset;
163
164         memset(ctx->ipad, 0, block_size);
165         memset(ctx->opad, 0, block_size);
166         shash->tfm = ctx->hash_tfm;
167         shash->flags = 0x0;
168
169         if (auth_keylen > block_size) {
170                 int ret = crypto_shash_digest(shash, auth_key,
171                                               auth_keylen, ctx->ipad);
172                 if (ret)
173                         return ret;
174
175                 memcpy(ctx->opad, ctx->ipad, digest_size);
176         } else {
177                 memcpy(ctx->ipad, auth_key, auth_keylen);
178                 memcpy(ctx->opad, auth_key, auth_keylen);
179         }
180
181         for (i = 0; i < block_size; i++) {
182                 char *ipad_ptr = ctx->ipad + i;
183                 char *opad_ptr = ctx->opad + i;
184                 *ipad_ptr ^= HMAC_IPAD_VALUE;
185                 *opad_ptr ^= HMAC_OPAD_VALUE;
186         }
187
188         if (crypto_shash_init(shash))
189                 return -EFAULT;
190
191         if (crypto_shash_update(shash, ctx->ipad, block_size))
192                 return -EFAULT;
193
194         hash_state_out = (__be32 *)hash->sha.state1;
195         hash512_state_out = (__be64 *)hash_state_out;
196
197         switch (ctx->qat_hash_alg) {
198         case ICP_QAT_HW_AUTH_ALGO_SHA1:
199                 if (crypto_shash_export(shash, &ctx->sha1))
200                         return -EFAULT;
201                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
202                         *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
203                 break;
204         case ICP_QAT_HW_AUTH_ALGO_SHA256:
205                 if (crypto_shash_export(shash, &ctx->sha256))
206                         return -EFAULT;
207                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208                         *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
209                 break;
210         case ICP_QAT_HW_AUTH_ALGO_SHA512:
211                 if (crypto_shash_export(shash, &ctx->sha512))
212                         return -EFAULT;
213                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
214                         *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
215                 break;
216         default:
217                 return -EFAULT;
218         }
219
220         if (crypto_shash_init(shash))
221                 return -EFAULT;
222
223         if (crypto_shash_update(shash, ctx->opad, block_size))
224                 return -EFAULT;
225
226         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
227         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
228         hash512_state_out = (__be64 *)hash_state_out;
229
230         switch (ctx->qat_hash_alg) {
231         case ICP_QAT_HW_AUTH_ALGO_SHA1:
232                 if (crypto_shash_export(shash, &ctx->sha1))
233                         return -EFAULT;
234                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
235                         *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
236                 break;
237         case ICP_QAT_HW_AUTH_ALGO_SHA256:
238                 if (crypto_shash_export(shash, &ctx->sha256))
239                         return -EFAULT;
240                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
241                         *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
242                 break;
243         case ICP_QAT_HW_AUTH_ALGO_SHA512:
244                 if (crypto_shash_export(shash, &ctx->sha512))
245                         return -EFAULT;
246                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
247                         *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
248                 break;
249         default:
250                 return -EFAULT;
251         }
252         memzero_explicit(ctx->ipad, block_size);
253         memzero_explicit(ctx->opad, block_size);
254         return 0;
255 }
256
257 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
258 {
259         header->hdr_flags =
260                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
261         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
262         header->comn_req_flags =
263                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
264                                             QAT_COMN_PTR_TYPE_SGL);
265         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
266                                   ICP_QAT_FW_LA_PARTIAL_NONE);
267         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
268                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
269         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
270                                 ICP_QAT_FW_LA_NO_PROTO);
271         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
272                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
273 }
274
275 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
276                                          int alg,
277                                          struct crypto_authenc_keys *keys,
278                                          int mode)
279 {
280         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
281         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
282         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
283         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
284         struct icp_qat_hw_auth_algo_blk *hash =
285                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
286                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
287         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
288         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
289         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
290         void *ptr = &req_tmpl->cd_ctrl;
291         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
292         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
293
294         /* CD setup */
295         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
296         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
297         hash->sha.inner_setup.auth_config.config =
298                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
299                                              ctx->qat_hash_alg, digestsize);
300         hash->sha.inner_setup.auth_counter.counter =
301                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
302
303         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
304                 return -EFAULT;
305
306         /* Request setup */
307         qat_alg_init_common_hdr(header);
308         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
309         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
310                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
311         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
312                                    ICP_QAT_FW_LA_RET_AUTH_RES);
313         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
314                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
315         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
316         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
317
318         /* Cipher CD config setup */
319         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
320         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
321         cipher_cd_ctrl->cipher_cfg_offset = 0;
322         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
323         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
324         /* Auth CD config setup */
325         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
326         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
327         hash_cd_ctrl->inner_res_sz = digestsize;
328         hash_cd_ctrl->final_sz = digestsize;
329
330         switch (ctx->qat_hash_alg) {
331         case ICP_QAT_HW_AUTH_ALGO_SHA1:
332                 hash_cd_ctrl->inner_state1_sz =
333                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
334                 hash_cd_ctrl->inner_state2_sz =
335                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
336                 break;
337         case ICP_QAT_HW_AUTH_ALGO_SHA256:
338                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
339                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
340                 break;
341         case ICP_QAT_HW_AUTH_ALGO_SHA512:
342                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
343                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
344                 break;
345         default:
346                 break;
347         }
348         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
349                         ((sizeof(struct icp_qat_hw_auth_setup) +
350                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
351         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
352         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
353         return 0;
354 }
355
356 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
357                                          int alg,
358                                          struct crypto_authenc_keys *keys,
359                                          int mode)
360 {
361         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
362         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
363         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
364         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
365         struct icp_qat_hw_cipher_algo_blk *cipher =
366                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
367                 sizeof(struct icp_qat_hw_auth_setup) +
368                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
369         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
370         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
371         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
372         void *ptr = &req_tmpl->cd_ctrl;
373         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
374         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
375         struct icp_qat_fw_la_auth_req_params *auth_param =
376                 (struct icp_qat_fw_la_auth_req_params *)
377                 ((char *)&req_tmpl->serv_specif_rqpars +
378                 sizeof(struct icp_qat_fw_la_cipher_req_params));
379
380         /* CD setup */
381         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
382         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
383         hash->sha.inner_setup.auth_config.config =
384                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
385                                              ctx->qat_hash_alg,
386                                              digestsize);
387         hash->sha.inner_setup.auth_counter.counter =
388                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
389
390         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
391                 return -EFAULT;
392
393         /* Request setup */
394         qat_alg_init_common_hdr(header);
395         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
396         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
397                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
398         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
399                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
400         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
401                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
402         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
403         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
404
405         /* Cipher CD config setup */
406         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
407         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
408         cipher_cd_ctrl->cipher_cfg_offset =
409                 (sizeof(struct icp_qat_hw_auth_setup) +
410                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
411         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
412         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
413
414         /* Auth CD config setup */
415         hash_cd_ctrl->hash_cfg_offset = 0;
416         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
417         hash_cd_ctrl->inner_res_sz = digestsize;
418         hash_cd_ctrl->final_sz = digestsize;
419
420         switch (ctx->qat_hash_alg) {
421         case ICP_QAT_HW_AUTH_ALGO_SHA1:
422                 hash_cd_ctrl->inner_state1_sz =
423                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
424                 hash_cd_ctrl->inner_state2_sz =
425                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
426                 break;
427         case ICP_QAT_HW_AUTH_ALGO_SHA256:
428                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
429                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
430                 break;
431         case ICP_QAT_HW_AUTH_ALGO_SHA512:
432                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
433                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
434                 break;
435         default:
436                 break;
437         }
438
439         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
440                         ((sizeof(struct icp_qat_hw_auth_setup) +
441                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
442         auth_param->auth_res_sz = digestsize;
443         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
444         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
445         return 0;
446 }
447
448 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
449                                         struct icp_qat_fw_la_bulk_req *req,
450                                         struct icp_qat_hw_cipher_algo_blk *cd,
451                                         const uint8_t *key, unsigned int keylen)
452 {
453         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
454         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
455         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
456
457         memcpy(cd->aes.key, key, keylen);
458         qat_alg_init_common_hdr(header);
459         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
460         cd_pars->u.s.content_desc_params_sz =
461                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
462         /* Cipher CD config setup */
463         cd_ctrl->cipher_key_sz = keylen >> 3;
464         cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
465         cd_ctrl->cipher_cfg_offset = 0;
466         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
467         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
468 }
469
470 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
471                                         int alg, const uint8_t *key,
472                                         unsigned int keylen, int mode)
473 {
474         struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
475         struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
476         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
477
478         qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
479         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
480         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
481 }
482
483 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
484                                         int alg, const uint8_t *key,
485                                         unsigned int keylen, int mode)
486 {
487         struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
488         struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
489         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
490
491         qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
492         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
493
494         if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
495                 dec_cd->aes.cipher_config.val =
496                                         QAT_AES_HW_CONFIG_DEC(alg, mode);
497         else
498                 dec_cd->aes.cipher_config.val =
499                                         QAT_AES_HW_CONFIG_ENC(alg, mode);
500 }
501
502 static int qat_alg_validate_key(int key_len, int *alg, int mode)
503 {
504         if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
505                 switch (key_len) {
506                 case AES_KEYSIZE_128:
507                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
508                         break;
509                 case AES_KEYSIZE_192:
510                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
511                         break;
512                 case AES_KEYSIZE_256:
513                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
514                         break;
515                 default:
516                         return -EINVAL;
517                 }
518         } else {
519                 switch (key_len) {
520                 case AES_KEYSIZE_128 << 1:
521                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
522                         break;
523                 case AES_KEYSIZE_256 << 1:
524                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
525                         break;
526                 default:
527                         return -EINVAL;
528                 }
529         }
530         return 0;
531 }
532
533 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
534                                       unsigned int keylen,  int mode)
535 {
536         struct crypto_authenc_keys keys;
537         int alg;
538
539         if (crypto_authenc_extractkeys(&keys, key, keylen))
540                 goto bad_key;
541
542         if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
543                 goto bad_key;
544
545         if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
546                 goto error;
547
548         if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
549                 goto error;
550
551         memzero_explicit(&keys, sizeof(keys));
552         return 0;
553 bad_key:
554         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
555         memzero_explicit(&keys, sizeof(keys));
556         return -EINVAL;
557 error:
558         memzero_explicit(&keys, sizeof(keys));
559         return -EFAULT;
560 }
561
562 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
563                                             const uint8_t *key,
564                                             unsigned int keylen,
565                                             int mode)
566 {
567         int alg;
568
569         if (qat_alg_validate_key(keylen, &alg, mode))
570                 goto bad_key;
571
572         qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
573         qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
574         return 0;
575 bad_key:
576         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
577         return -EINVAL;
578 }
579
580 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
581                                unsigned int keylen)
582 {
583         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
584         struct device *dev;
585
586         if (ctx->enc_cd) {
587                 /* rekeying */
588                 dev = &GET_DEV(ctx->inst->accel_dev);
589                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
590                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
591                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
592                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
593         } else {
594                 /* new key */
595                 int node = get_current_node();
596                 struct qat_crypto_instance *inst =
597                                 qat_crypto_get_instance_node(node);
598                 if (!inst) {
599                         return -EINVAL;
600                 }
601
602                 dev = &GET_DEV(inst->accel_dev);
603                 ctx->inst = inst;
604                 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
605                                                  &ctx->enc_cd_paddr,
606                                                  GFP_ATOMIC);
607                 if (!ctx->enc_cd) {
608                         return -ENOMEM;
609                 }
610                 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
611                                                  &ctx->dec_cd_paddr,
612                                                  GFP_ATOMIC);
613                 if (!ctx->dec_cd) {
614                         goto out_free_enc;
615                 }
616         }
617         if (qat_alg_aead_init_sessions(tfm, key, keylen,
618                                        ICP_QAT_HW_CIPHER_CBC_MODE))
619                 goto out_free_all;
620
621         return 0;
622
623 out_free_all:
624         memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
625         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
626                           ctx->dec_cd, ctx->dec_cd_paddr);
627         ctx->dec_cd = NULL;
628 out_free_enc:
629         memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
630         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
631                           ctx->enc_cd, ctx->enc_cd_paddr);
632         ctx->enc_cd = NULL;
633         return -ENOMEM;
634 }
635
636 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
637                               struct qat_crypto_request *qat_req)
638 {
639         struct device *dev = &GET_DEV(inst->accel_dev);
640         struct qat_alg_buf_list *bl = qat_req->buf.bl;
641         struct qat_alg_buf_list *blout = qat_req->buf.blout;
642         dma_addr_t blp = qat_req->buf.blp;
643         dma_addr_t blpout = qat_req->buf.bloutp;
644         size_t sz = qat_req->buf.sz;
645         size_t sz_out = qat_req->buf.sz_out;
646         int i;
647
648         for (i = 0; i < bl->num_bufs; i++)
649                 dma_unmap_single(dev, bl->bufers[i].addr,
650                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
651
652         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
653         kfree(bl);
654         if (blp != blpout) {
655                 /* If out of place operation dma unmap only data */
656                 int bufless = blout->num_bufs - blout->num_mapped_bufs;
657
658                 for (i = bufless; i < blout->num_bufs; i++) {
659                         dma_unmap_single(dev, blout->bufers[i].addr,
660                                          blout->bufers[i].len,
661                                          DMA_BIDIRECTIONAL);
662                 }
663                 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
664                 kfree(blout);
665         }
666 }
667
668 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
669                                struct scatterlist *sgl,
670                                struct scatterlist *sglout,
671                                struct qat_crypto_request *qat_req)
672 {
673         struct device *dev = &GET_DEV(inst->accel_dev);
674         int i, sg_nctr = 0;
675         int n = sg_nents(sgl);
676         struct qat_alg_buf_list *bufl;
677         struct qat_alg_buf_list *buflout = NULL;
678         dma_addr_t blp;
679         dma_addr_t bloutp = 0;
680         struct scatterlist *sg;
681         size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
682                         ((1 + n) * sizeof(struct qat_alg_buf));
683
684         if (unlikely(!n))
685                 return -EINVAL;
686
687         bufl = kzalloc_node(sz, GFP_ATOMIC,
688                             dev_to_node(&GET_DEV(inst->accel_dev)));
689         if (unlikely(!bufl))
690                 return -ENOMEM;
691
692         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
693         if (unlikely(dma_mapping_error(dev, blp)))
694                 goto err_in;
695
696         for_each_sg(sgl, sg, n, i) {
697                 int y = sg_nctr;
698
699                 if (!sg->length)
700                         continue;
701
702                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
703                                                       sg->length,
704                                                       DMA_BIDIRECTIONAL);
705                 bufl->bufers[y].len = sg->length;
706                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
707                         goto err_in;
708                 sg_nctr++;
709         }
710         bufl->num_bufs = sg_nctr;
711         qat_req->buf.bl = bufl;
712         qat_req->buf.blp = blp;
713         qat_req->buf.sz = sz;
714         /* Handle out of place operation */
715         if (sgl != sglout) {
716                 struct qat_alg_buf *bufers;
717
718                 n = sg_nents(sglout);
719                 sz_out = sizeof(struct qat_alg_buf_list) +
720                         ((1 + n) * sizeof(struct qat_alg_buf));
721                 sg_nctr = 0;
722                 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
723                                        dev_to_node(&GET_DEV(inst->accel_dev)));
724                 if (unlikely(!buflout))
725                         goto err_in;
726                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
727                 if (unlikely(dma_mapping_error(dev, bloutp)))
728                         goto err_out;
729                 bufers = buflout->bufers;
730                 for_each_sg(sglout, sg, n, i) {
731                         int y = sg_nctr;
732
733                         if (!sg->length)
734                                 continue;
735
736                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
737                                                         sg->length,
738                                                         DMA_BIDIRECTIONAL);
739                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
740                                 goto err_out;
741                         bufers[y].len = sg->length;
742                         sg_nctr++;
743                 }
744                 buflout->num_bufs = sg_nctr;
745                 buflout->num_mapped_bufs = sg_nctr;
746                 qat_req->buf.blout = buflout;
747                 qat_req->buf.bloutp = bloutp;
748                 qat_req->buf.sz_out = sz_out;
749         } else {
750                 /* Otherwise set the src and dst to the same address */
751                 qat_req->buf.bloutp = qat_req->buf.blp;
752                 qat_req->buf.sz_out = 0;
753         }
754         return 0;
755
756 err_out:
757         n = sg_nents(sglout);
758         for (i = 0; i < n; i++)
759                 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
760                         dma_unmap_single(dev, buflout->bufers[i].addr,
761                                          buflout->bufers[i].len,
762                                          DMA_BIDIRECTIONAL);
763         if (!dma_mapping_error(dev, bloutp))
764                 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
765         kfree(buflout);
766
767 err_in:
768         n = sg_nents(sgl);
769         for (i = 0; i < n; i++)
770                 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
771                         dma_unmap_single(dev, bufl->bufers[i].addr,
772                                          bufl->bufers[i].len,
773                                          DMA_BIDIRECTIONAL);
774
775         if (!dma_mapping_error(dev, blp))
776                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
777         kfree(bufl);
778
779         dev_err(dev, "Failed to map buf for dma\n");
780         return -ENOMEM;
781 }
782
783 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
784                                   struct qat_crypto_request *qat_req)
785 {
786         struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
787         struct qat_crypto_instance *inst = ctx->inst;
788         struct aead_request *areq = qat_req->aead_req;
789         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
790         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
791
792         qat_alg_free_bufl(inst, qat_req);
793         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
794                 res = -EBADMSG;
795         areq->base.complete(&areq->base, res);
796 }
797
798 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
799                                         struct qat_crypto_request *qat_req)
800 {
801         struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
802         struct qat_crypto_instance *inst = ctx->inst;
803         struct ablkcipher_request *areq = qat_req->ablkcipher_req;
804         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
805         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
806
807         qat_alg_free_bufl(inst, qat_req);
808         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
809                 res = -EINVAL;
810         areq->base.complete(&areq->base, res);
811 }
812
813 void qat_alg_callback(void *resp)
814 {
815         struct icp_qat_fw_la_resp *qat_resp = resp;
816         struct qat_crypto_request *qat_req =
817                                 (void *)(__force long)qat_resp->opaque_data;
818
819         qat_req->cb(qat_resp, qat_req);
820 }
821
822 static int qat_alg_aead_dec(struct aead_request *areq)
823 {
824         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
825         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
826         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
827         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
828         struct icp_qat_fw_la_cipher_req_params *cipher_param;
829         struct icp_qat_fw_la_auth_req_params *auth_param;
830         struct icp_qat_fw_la_bulk_req *msg;
831         int digst_size = crypto_aead_authsize(aead_tfm);
832         int ret, ctr = 0;
833
834         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
835         if (unlikely(ret))
836                 return ret;
837
838         msg = &qat_req->req;
839         *msg = ctx->dec_fw_req;
840         qat_req->aead_ctx = ctx;
841         qat_req->aead_req = areq;
842         qat_req->cb = qat_aead_alg_callback;
843         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
844         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
845         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
846         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
847         cipher_param->cipher_length = areq->cryptlen - digst_size;
848         cipher_param->cipher_offset = areq->assoclen;
849         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
850         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
851         auth_param->auth_off = 0;
852         auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
853         do {
854                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
855         } while (ret == -EAGAIN && ctr++ < 10);
856
857         if (ret == -EAGAIN) {
858                 qat_alg_free_bufl(ctx->inst, qat_req);
859                 return -EBUSY;
860         }
861         return -EINPROGRESS;
862 }
863
864 static int qat_alg_aead_enc(struct aead_request *areq)
865 {
866         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
867         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
868         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
869         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
870         struct icp_qat_fw_la_cipher_req_params *cipher_param;
871         struct icp_qat_fw_la_auth_req_params *auth_param;
872         struct icp_qat_fw_la_bulk_req *msg;
873         uint8_t *iv = areq->iv;
874         int ret, ctr = 0;
875
876         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
877         if (unlikely(ret))
878                 return ret;
879
880         msg = &qat_req->req;
881         *msg = ctx->enc_fw_req;
882         qat_req->aead_ctx = ctx;
883         qat_req->aead_req = areq;
884         qat_req->cb = qat_aead_alg_callback;
885         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
886         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
887         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
888         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
889         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
890
891         memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
892         cipher_param->cipher_length = areq->cryptlen;
893         cipher_param->cipher_offset = areq->assoclen;
894
895         auth_param->auth_off = 0;
896         auth_param->auth_len = areq->assoclen + areq->cryptlen;
897
898         do {
899                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
900         } while (ret == -EAGAIN && ctr++ < 10);
901
902         if (ret == -EAGAIN) {
903                 qat_alg_free_bufl(ctx->inst, qat_req);
904                 return -EBUSY;
905         }
906         return -EINPROGRESS;
907 }
908
909 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
910                                      const u8 *key, unsigned int keylen,
911                                      int mode)
912 {
913         struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
914         struct device *dev;
915
916         spin_lock(&ctx->lock);
917         if (ctx->enc_cd) {
918                 /* rekeying */
919                 dev = &GET_DEV(ctx->inst->accel_dev);
920                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
921                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
922                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
923                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
924         } else {
925                 /* new key */
926                 int node = get_current_node();
927                 struct qat_crypto_instance *inst =
928                                 qat_crypto_get_instance_node(node);
929                 if (!inst) {
930                         spin_unlock(&ctx->lock);
931                         return -EINVAL;
932                 }
933
934                 dev = &GET_DEV(inst->accel_dev);
935                 ctx->inst = inst;
936                 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
937                                                  &ctx->enc_cd_paddr,
938                                                  GFP_ATOMIC);
939                 if (!ctx->enc_cd) {
940                         spin_unlock(&ctx->lock);
941                         return -ENOMEM;
942                 }
943                 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
944                                                  &ctx->dec_cd_paddr,
945                                                  GFP_ATOMIC);
946                 if (!ctx->dec_cd) {
947                         spin_unlock(&ctx->lock);
948                         goto out_free_enc;
949                 }
950         }
951         spin_unlock(&ctx->lock);
952         if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
953                 goto out_free_all;
954
955         return 0;
956
957 out_free_all:
958         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
959         dma_free_coherent(dev, sizeof(*ctx->dec_cd),
960                           ctx->dec_cd, ctx->dec_cd_paddr);
961         ctx->dec_cd = NULL;
962 out_free_enc:
963         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
964         dma_free_coherent(dev, sizeof(*ctx->enc_cd),
965                           ctx->enc_cd, ctx->enc_cd_paddr);
966         ctx->enc_cd = NULL;
967         return -ENOMEM;
968 }
969
970 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
971                                          const u8 *key, unsigned int keylen)
972 {
973         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
974                                          ICP_QAT_HW_CIPHER_CBC_MODE);
975 }
976
977 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
978                                          const u8 *key, unsigned int keylen)
979 {
980         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
981                                          ICP_QAT_HW_CIPHER_CTR_MODE);
982 }
983
984 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
985                                          const u8 *key, unsigned int keylen)
986 {
987         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
988                                          ICP_QAT_HW_CIPHER_XTS_MODE);
989 }
990
991 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
992 {
993         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
994         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
995         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
996         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
997         struct icp_qat_fw_la_cipher_req_params *cipher_param;
998         struct icp_qat_fw_la_bulk_req *msg;
999         int ret, ctr = 0;
1000
1001         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1002         if (unlikely(ret))
1003                 return ret;
1004
1005         msg = &qat_req->req;
1006         *msg = ctx->enc_fw_req;
1007         qat_req->ablkcipher_ctx = ctx;
1008         qat_req->ablkcipher_req = req;
1009         qat_req->cb = qat_ablkcipher_alg_callback;
1010         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1011         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1012         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1013         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1014         cipher_param->cipher_length = req->nbytes;
1015         cipher_param->cipher_offset = 0;
1016         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1017         do {
1018                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1019         } while (ret == -EAGAIN && ctr++ < 10);
1020
1021         if (ret == -EAGAIN) {
1022                 qat_alg_free_bufl(ctx->inst, qat_req);
1023                 return -EBUSY;
1024         }
1025         return -EINPROGRESS;
1026 }
1027
1028 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1029 {
1030         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1031         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1032         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1033         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1034         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1035         struct icp_qat_fw_la_bulk_req *msg;
1036         int ret, ctr = 0;
1037
1038         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1039         if (unlikely(ret))
1040                 return ret;
1041
1042         msg = &qat_req->req;
1043         *msg = ctx->dec_fw_req;
1044         qat_req->ablkcipher_ctx = ctx;
1045         qat_req->ablkcipher_req = req;
1046         qat_req->cb = qat_ablkcipher_alg_callback;
1047         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1048         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1049         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1050         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1051         cipher_param->cipher_length = req->nbytes;
1052         cipher_param->cipher_offset = 0;
1053         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1054         do {
1055                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1056         } while (ret == -EAGAIN && ctr++ < 10);
1057
1058         if (ret == -EAGAIN) {
1059                 qat_alg_free_bufl(ctx->inst, qat_req);
1060                 return -EBUSY;
1061         }
1062         return -EINPROGRESS;
1063 }
1064
1065 static int qat_alg_aead_init(struct crypto_aead *tfm,
1066                              enum icp_qat_hw_auth_algo hash,
1067                              const char *hash_name)
1068 {
1069         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1070
1071         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1072         if (IS_ERR(ctx->hash_tfm))
1073                 return PTR_ERR(ctx->hash_tfm);
1074         ctx->qat_hash_alg = hash;
1075         crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1076         return 0;
1077 }
1078
1079 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1080 {
1081         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1082 }
1083
1084 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1085 {
1086         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1087 }
1088
1089 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1090 {
1091         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1092 }
1093
1094 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1095 {
1096         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1097         struct qat_crypto_instance *inst = ctx->inst;
1098         struct device *dev;
1099
1100         crypto_free_shash(ctx->hash_tfm);
1101
1102         if (!inst)
1103                 return;
1104
1105         dev = &GET_DEV(inst->accel_dev);
1106         if (ctx->enc_cd) {
1107                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1108                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1109                                   ctx->enc_cd, ctx->enc_cd_paddr);
1110         }
1111         if (ctx->dec_cd) {
1112                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1113                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1114                                   ctx->dec_cd, ctx->dec_cd_paddr);
1115         }
1116         qat_crypto_put_instance(inst);
1117 }
1118
1119 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1120 {
1121         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1122
1123         spin_lock_init(&ctx->lock);
1124         tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1125         ctx->tfm = tfm;
1126         return 0;
1127 }
1128
1129 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1130 {
1131         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1132         struct qat_crypto_instance *inst = ctx->inst;
1133         struct device *dev;
1134
1135         if (!inst)
1136                 return;
1137
1138         dev = &GET_DEV(inst->accel_dev);
1139         if (ctx->enc_cd) {
1140                 memset(ctx->enc_cd, 0,
1141                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1142                 dma_free_coherent(dev,
1143                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1144                                   ctx->enc_cd, ctx->enc_cd_paddr);
1145         }
1146         if (ctx->dec_cd) {
1147                 memset(ctx->dec_cd, 0,
1148                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1149                 dma_free_coherent(dev,
1150                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1151                                   ctx->dec_cd, ctx->dec_cd_paddr);
1152         }
1153         qat_crypto_put_instance(inst);
1154 }
1155
1156
1157 static struct aead_alg qat_aeads[] = { {
1158         .base = {
1159                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1160                 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1161                 .cra_priority = 4001,
1162                 .cra_flags = CRYPTO_ALG_ASYNC,
1163                 .cra_blocksize = AES_BLOCK_SIZE,
1164                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1165                 .cra_module = THIS_MODULE,
1166         },
1167         .init = qat_alg_aead_sha1_init,
1168         .exit = qat_alg_aead_exit,
1169         .setkey = qat_alg_aead_setkey,
1170         .decrypt = qat_alg_aead_dec,
1171         .encrypt = qat_alg_aead_enc,
1172         .ivsize = AES_BLOCK_SIZE,
1173         .maxauthsize = SHA1_DIGEST_SIZE,
1174 }, {
1175         .base = {
1176                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1177                 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1178                 .cra_priority = 4001,
1179                 .cra_flags = CRYPTO_ALG_ASYNC,
1180                 .cra_blocksize = AES_BLOCK_SIZE,
1181                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1182                 .cra_module = THIS_MODULE,
1183         },
1184         .init = qat_alg_aead_sha256_init,
1185         .exit = qat_alg_aead_exit,
1186         .setkey = qat_alg_aead_setkey,
1187         .decrypt = qat_alg_aead_dec,
1188         .encrypt = qat_alg_aead_enc,
1189         .ivsize = AES_BLOCK_SIZE,
1190         .maxauthsize = SHA256_DIGEST_SIZE,
1191 }, {
1192         .base = {
1193                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1194                 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1195                 .cra_priority = 4001,
1196                 .cra_flags = CRYPTO_ALG_ASYNC,
1197                 .cra_blocksize = AES_BLOCK_SIZE,
1198                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1199                 .cra_module = THIS_MODULE,
1200         },
1201         .init = qat_alg_aead_sha512_init,
1202         .exit = qat_alg_aead_exit,
1203         .setkey = qat_alg_aead_setkey,
1204         .decrypt = qat_alg_aead_dec,
1205         .encrypt = qat_alg_aead_enc,
1206         .ivsize = AES_BLOCK_SIZE,
1207         .maxauthsize = SHA512_DIGEST_SIZE,
1208 } };
1209
1210 static struct crypto_alg qat_algs[] = { {
1211         .cra_name = "cbc(aes)",
1212         .cra_driver_name = "qat_aes_cbc",
1213         .cra_priority = 4001,
1214         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1215         .cra_blocksize = AES_BLOCK_SIZE,
1216         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1217         .cra_alignmask = 0,
1218         .cra_type = &crypto_ablkcipher_type,
1219         .cra_module = THIS_MODULE,
1220         .cra_init = qat_alg_ablkcipher_init,
1221         .cra_exit = qat_alg_ablkcipher_exit,
1222         .cra_u = {
1223                 .ablkcipher = {
1224                         .setkey = qat_alg_ablkcipher_cbc_setkey,
1225                         .decrypt = qat_alg_ablkcipher_decrypt,
1226                         .encrypt = qat_alg_ablkcipher_encrypt,
1227                         .min_keysize = AES_MIN_KEY_SIZE,
1228                         .max_keysize = AES_MAX_KEY_SIZE,
1229                         .ivsize = AES_BLOCK_SIZE,
1230                 },
1231         },
1232 }, {
1233         .cra_name = "ctr(aes)",
1234         .cra_driver_name = "qat_aes_ctr",
1235         .cra_priority = 4001,
1236         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1237         .cra_blocksize = AES_BLOCK_SIZE,
1238         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1239         .cra_alignmask = 0,
1240         .cra_type = &crypto_ablkcipher_type,
1241         .cra_module = THIS_MODULE,
1242         .cra_init = qat_alg_ablkcipher_init,
1243         .cra_exit = qat_alg_ablkcipher_exit,
1244         .cra_u = {
1245                 .ablkcipher = {
1246                         .setkey = qat_alg_ablkcipher_ctr_setkey,
1247                         .decrypt = qat_alg_ablkcipher_decrypt,
1248                         .encrypt = qat_alg_ablkcipher_encrypt,
1249                         .min_keysize = AES_MIN_KEY_SIZE,
1250                         .max_keysize = AES_MAX_KEY_SIZE,
1251                         .ivsize = AES_BLOCK_SIZE,
1252                 },
1253         },
1254 }, {
1255         .cra_name = "xts(aes)",
1256         .cra_driver_name = "qat_aes_xts",
1257         .cra_priority = 4001,
1258         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1259         .cra_blocksize = AES_BLOCK_SIZE,
1260         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1261         .cra_alignmask = 0,
1262         .cra_type = &crypto_ablkcipher_type,
1263         .cra_module = THIS_MODULE,
1264         .cra_init = qat_alg_ablkcipher_init,
1265         .cra_exit = qat_alg_ablkcipher_exit,
1266         .cra_u = {
1267                 .ablkcipher = {
1268                         .setkey = qat_alg_ablkcipher_xts_setkey,
1269                         .decrypt = qat_alg_ablkcipher_decrypt,
1270                         .encrypt = qat_alg_ablkcipher_encrypt,
1271                         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1272                         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1273                         .ivsize = AES_BLOCK_SIZE,
1274                 },
1275         },
1276 } };
1277
1278 int qat_algs_register(void)
1279 {
1280         int ret = 0, i;
1281
1282         mutex_lock(&algs_lock);
1283         if (++active_devs != 1)
1284                 goto unlock;
1285
1286         for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1287                 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1288
1289         ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1290         if (ret)
1291                 goto unlock;
1292
1293         for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1294                 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1295
1296         ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1297         if (ret)
1298                 goto unreg_algs;
1299
1300 unlock:
1301         mutex_unlock(&algs_lock);
1302         return ret;
1303
1304 unreg_algs:
1305         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1306         goto unlock;
1307 }
1308
1309 void qat_algs_unregister(void)
1310 {
1311         mutex_lock(&algs_lock);
1312         if (--active_devs != 0)
1313                 goto unlock;
1314
1315         crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1316         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1317
1318 unlock:
1319         mutex_unlock(&algs_lock);
1320 }