2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static atomic_t active_dev;
84 struct qat_alg_buf_list {
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
91 /* Common content descriptor */
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
107 struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
111 struct qat_alg_session_ctx {
112 struct qat_alg_cd *enc_cd;
113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr;
116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
118 struct qat_crypto_instance *inst;
119 struct crypto_tfm *tfm;
120 struct crypto_shash *hash_tfm;
121 enum icp_qat_hw_auth_algo qat_hash_alg;
122 uint8_t salt[AES_BLOCK_SIZE];
123 spinlock_t lock; /* protects qat_alg_session_ctx struct */
126 static int get_current_node(void)
128 return cpu_data(current_thread_info()->cpu).phys_proc_id;
131 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
133 switch (qat_hash_alg) {
134 case ICP_QAT_HW_AUTH_ALGO_SHA1:
135 return ICP_QAT_HW_SHA1_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA256:
137 return ICP_QAT_HW_SHA256_STATE1_SZ;
138 case ICP_QAT_HW_AUTH_ALGO_SHA512:
139 return ICP_QAT_HW_SHA512_STATE1_SZ;
146 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
147 struct qat_alg_session_ctx *ctx,
148 const uint8_t *auth_key,
149 unsigned int auth_keylen)
151 struct qat_auth_state auth_state;
153 struct shash_desc shash;
154 char ctx[crypto_shash_descsize(ctx->hash_tfm)];
156 struct sha1_state sha1;
157 struct sha256_state sha256;
158 struct sha512_state sha512;
159 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
160 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
161 uint8_t *ipad = auth_state.data;
162 uint8_t *opad = ipad + block_size;
163 __be32 *hash_state_out;
164 __be64 *hash512_state_out;
167 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
168 desc.shash.tfm = ctx->hash_tfm;
169 desc.shash.flags = 0x0;
171 if (auth_keylen > block_size) {
172 char buff[SHA512_BLOCK_SIZE];
173 int ret = crypto_shash_digest(&desc.shash, auth_key,
178 memcpy(ipad, buff, digest_size);
179 memcpy(opad, buff, digest_size);
180 memset(ipad + digest_size, 0, block_size - digest_size);
181 memset(opad + digest_size, 0, block_size - digest_size);
183 memcpy(ipad, auth_key, auth_keylen);
184 memcpy(opad, auth_key, auth_keylen);
185 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
186 memset(opad + auth_keylen, 0, block_size - auth_keylen);
189 for (i = 0; i < block_size; i++) {
190 char *ipad_ptr = ipad + i;
191 char *opad_ptr = opad + i;
196 if (crypto_shash_init(&desc.shash))
199 if (crypto_shash_update(&desc.shash, ipad, block_size))
202 hash_state_out = (__be32 *)hash->sha.state1;
203 hash512_state_out = (__be64 *)hash_state_out;
205 switch (ctx->qat_hash_alg) {
206 case ICP_QAT_HW_AUTH_ALGO_SHA1:
207 if (crypto_shash_export(&desc.shash, &sha1))
209 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
210 *hash_state_out = cpu_to_be32(*(sha1.state + i));
212 case ICP_QAT_HW_AUTH_ALGO_SHA256:
213 if (crypto_shash_export(&desc.shash, &sha256))
215 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
216 *hash_state_out = cpu_to_be32(*(sha256.state + i));
218 case ICP_QAT_HW_AUTH_ALGO_SHA512:
219 if (crypto_shash_export(&desc.shash, &sha512))
221 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
222 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
228 if (crypto_shash_init(&desc.shash))
231 if (crypto_shash_update(&desc.shash, opad, block_size))
234 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
235 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
236 hash512_state_out = (__be64 *)hash_state_out;
238 switch (ctx->qat_hash_alg) {
239 case ICP_QAT_HW_AUTH_ALGO_SHA1:
240 if (crypto_shash_export(&desc.shash, &sha1))
242 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
243 *hash_state_out = cpu_to_be32(*(sha1.state + i));
245 case ICP_QAT_HW_AUTH_ALGO_SHA256:
246 if (crypto_shash_export(&desc.shash, &sha256))
248 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
249 *hash_state_out = cpu_to_be32(*(sha256.state + i));
251 case ICP_QAT_HW_AUTH_ALGO_SHA512:
252 if (crypto_shash_export(&desc.shash, &sha512))
254 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
255 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
263 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
266 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
267 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
268 header->comn_req_flags =
269 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
270 QAT_COMN_PTR_TYPE_SGL);
271 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
272 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
273 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
274 ICP_QAT_FW_LA_PARTIAL_NONE);
275 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
276 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
277 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
278 ICP_QAT_FW_LA_NO_PROTO);
279 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
280 ICP_QAT_FW_LA_NO_UPDATE_STATE);
283 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
284 int alg, struct crypto_authenc_keys *keys)
286 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
287 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
288 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
289 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
290 struct icp_qat_hw_auth_algo_blk *hash =
291 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
292 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
293 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
294 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
295 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
296 void *ptr = &req_tmpl->cd_ctrl;
297 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
298 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
301 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
302 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
303 hash->sha.inner_setup.auth_config.config =
304 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
305 ctx->qat_hash_alg, digestsize);
306 hash->sha.inner_setup.auth_counter.counter =
307 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
309 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
313 qat_alg_init_common_hdr(header);
314 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
315 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
316 ICP_QAT_FW_LA_RET_AUTH_RES);
317 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
318 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
319 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
320 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
322 /* Cipher CD config setup */
323 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
324 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
325 cipher_cd_ctrl->cipher_cfg_offset = 0;
326 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
327 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
328 /* Auth CD config setup */
329 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
330 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
331 hash_cd_ctrl->inner_res_sz = digestsize;
332 hash_cd_ctrl->final_sz = digestsize;
334 switch (ctx->qat_hash_alg) {
335 case ICP_QAT_HW_AUTH_ALGO_SHA1:
336 hash_cd_ctrl->inner_state1_sz =
337 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
338 hash_cd_ctrl->inner_state2_sz =
339 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
341 case ICP_QAT_HW_AUTH_ALGO_SHA256:
342 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
343 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
345 case ICP_QAT_HW_AUTH_ALGO_SHA512:
346 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
347 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
352 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
353 ((sizeof(struct icp_qat_hw_auth_setup) +
354 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
360 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
361 int alg, struct crypto_authenc_keys *keys)
363 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
364 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
365 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
366 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
367 struct icp_qat_hw_cipher_algo_blk *cipher =
368 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
369 sizeof(struct icp_qat_hw_auth_setup) +
370 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
371 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
372 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
373 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
374 void *ptr = &req_tmpl->cd_ctrl;
375 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
376 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
377 struct icp_qat_fw_la_auth_req_params *auth_param =
378 (struct icp_qat_fw_la_auth_req_params *)
379 ((char *)&req_tmpl->serv_specif_rqpars +
380 sizeof(struct icp_qat_fw_la_cipher_req_params));
383 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
384 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
385 hash->sha.inner_setup.auth_config.config =
386 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
389 hash->sha.inner_setup.auth_counter.counter =
390 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
392 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
396 qat_alg_init_common_hdr(header);
397 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
398 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
400 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
401 ICP_QAT_FW_LA_CMP_AUTH_RES);
402 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
403 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
405 /* Cipher CD config setup */
406 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
407 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
408 cipher_cd_ctrl->cipher_cfg_offset =
409 (sizeof(struct icp_qat_hw_auth_setup) +
410 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
411 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
412 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
414 /* Auth CD config setup */
415 hash_cd_ctrl->hash_cfg_offset = 0;
416 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
417 hash_cd_ctrl->inner_res_sz = digestsize;
418 hash_cd_ctrl->final_sz = digestsize;
420 switch (ctx->qat_hash_alg) {
421 case ICP_QAT_HW_AUTH_ALGO_SHA1:
422 hash_cd_ctrl->inner_state1_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
424 hash_cd_ctrl->inner_state2_sz =
425 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
427 case ICP_QAT_HW_AUTH_ALGO_SHA256:
428 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
429 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
431 case ICP_QAT_HW_AUTH_ALGO_SHA512:
432 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
433 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
439 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
440 ((sizeof(struct icp_qat_hw_auth_setup) +
441 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
442 auth_param->auth_res_sz = digestsize;
443 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
444 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
448 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
449 const uint8_t *key, unsigned int keylen)
451 struct crypto_authenc_keys keys;
454 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
457 if (crypto_authenc_extractkeys(&keys, key, keylen))
460 switch (keys.enckeylen) {
461 case AES_KEYSIZE_128:
462 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
464 case AES_KEYSIZE_192:
465 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
467 case AES_KEYSIZE_256:
468 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
475 if (qat_alg_init_enc_session(ctx, alg, &keys))
478 if (qat_alg_init_dec_session(ctx, alg, &keys))
483 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
489 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
492 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
495 spin_lock(&ctx->lock);
498 dev = &GET_DEV(ctx->inst->accel_dev);
499 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
500 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
501 memset(&ctx->enc_fw_req_tmpl, 0,
502 sizeof(struct icp_qat_fw_la_bulk_req));
503 memset(&ctx->dec_fw_req_tmpl, 0,
504 sizeof(struct icp_qat_fw_la_bulk_req));
507 int node = get_current_node();
508 struct qat_crypto_instance *inst =
509 qat_crypto_get_instance_node(node);
511 spin_unlock(&ctx->lock);
515 dev = &GET_DEV(inst->accel_dev);
517 ctx->enc_cd = dma_zalloc_coherent(dev,
518 sizeof(struct qat_alg_cd),
522 spin_unlock(&ctx->lock);
525 ctx->dec_cd = dma_zalloc_coherent(dev,
526 sizeof(struct qat_alg_cd),
530 spin_unlock(&ctx->lock);
534 spin_unlock(&ctx->lock);
535 if (qat_alg_init_sessions(ctx, key, keylen))
541 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
542 ctx->dec_cd, ctx->dec_cd_paddr);
545 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
546 ctx->enc_cd, ctx->enc_cd_paddr);
551 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
552 struct qat_crypto_request *qat_req)
554 struct device *dev = &GET_DEV(inst->accel_dev);
555 struct qat_alg_buf_list *bl = qat_req->buf.bl;
556 struct qat_alg_buf_list *blout = qat_req->buf.blout;
557 dma_addr_t blp = qat_req->buf.blp;
558 dma_addr_t blpout = qat_req->buf.bloutp;
559 size_t sz = qat_req->buf.sz;
560 int i, bufs = bl->num_bufs;
562 for (i = 0; i < bl->num_bufs; i++)
563 dma_unmap_single(dev, bl->bufers[i].addr,
564 bl->bufers[i].len, DMA_BIDIRECTIONAL);
566 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
569 /* If out of place operation dma unmap only data */
570 int bufless = bufs - blout->num_mapped_bufs;
572 for (i = bufless; i < bufs; i++) {
573 dma_unmap_single(dev, blout->bufers[i].addr,
574 blout->bufers[i].len,
577 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
582 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
583 struct scatterlist *assoc,
584 struct scatterlist *sgl,
585 struct scatterlist *sglout, uint8_t *iv,
587 struct qat_crypto_request *qat_req)
589 struct device *dev = &GET_DEV(inst->accel_dev);
590 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
591 struct qat_alg_buf_list *bufl;
592 struct qat_alg_buf_list *buflout = NULL;
594 dma_addr_t bloutp = 0;
595 struct scatterlist *sg;
596 size_t sz = sizeof(struct qat_alg_buf_list) +
597 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
602 bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
606 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
607 if (unlikely(dma_mapping_error(dev, blp)))
610 for_each_sg(assoc, sg, assoc_n, i) {
611 bufl->bufers[bufs].addr = dma_map_single(dev,
615 bufl->bufers[bufs].len = sg->length;
616 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
620 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
622 bufl->bufers[bufs].len = ivlen;
623 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
627 for_each_sg(sgl, sg, n, i) {
630 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
633 bufl->bufers[y].len = sg->length;
634 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
637 bufl->num_bufs = n + bufs;
638 qat_req->buf.bl = bufl;
639 qat_req->buf.blp = blp;
640 qat_req->buf.sz = sz;
641 /* Handle out of place operation */
643 struct qat_alg_buf *bufers;
645 buflout = kmalloc_node(sz, GFP_ATOMIC,
646 inst->accel_dev->numa_node);
647 if (unlikely(!buflout))
649 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
650 if (unlikely(dma_mapping_error(dev, bloutp)))
652 bufers = buflout->bufers;
653 /* For out of place operation dma map only data and
654 * reuse assoc mapping and iv */
655 for (i = 0; i < bufs; i++) {
656 bufers[i].len = bufl->bufers[i].len;
657 bufers[i].addr = bufl->bufers[i].addr;
659 for_each_sg(sglout, sg, n, i) {
662 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
665 buflout->bufers[y].len = sg->length;
666 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
669 buflout->num_bufs = n + bufs;
670 buflout->num_mapped_bufs = n;
671 qat_req->buf.blout = buflout;
672 qat_req->buf.bloutp = bloutp;
674 /* Otherwise set the src and dst to the same address */
675 qat_req->buf.bloutp = qat_req->buf.blp;
679 dev_err(dev, "Failed to map buf for dma\n");
680 for_each_sg(sgl, sg, n + bufs, i) {
681 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
682 dma_unmap_single(dev, bufl->bufers[i].addr,
687 if (!dma_mapping_error(dev, blp))
688 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
690 if (sgl != sglout && buflout) {
691 for_each_sg(sglout, sg, n, i) {
694 if (!dma_mapping_error(dev, buflout->bufers[y].addr))
695 dma_unmap_single(dev, buflout->bufers[y].addr,
696 buflout->bufers[y].len,
699 if (!dma_mapping_error(dev, bloutp))
700 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
706 void qat_alg_callback(void *resp)
708 struct icp_qat_fw_la_resp *qat_resp = resp;
709 struct qat_crypto_request *qat_req =
710 (void *)(__force long)qat_resp->opaque_data;
711 struct qat_alg_session_ctx *ctx = qat_req->ctx;
712 struct qat_crypto_instance *inst = ctx->inst;
713 struct aead_request *areq = qat_req->areq;
714 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
715 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
717 qat_alg_free_bufl(inst, qat_req);
718 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
720 areq->base.complete(&areq->base, res);
723 static int qat_alg_dec(struct aead_request *areq)
725 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
726 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
727 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
728 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
729 struct icp_qat_fw_la_cipher_req_params *cipher_param;
730 struct icp_qat_fw_la_auth_req_params *auth_param;
731 struct icp_qat_fw_la_bulk_req *msg;
732 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
735 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
736 areq->iv, AES_BLOCK_SIZE, qat_req);
741 *msg = ctx->dec_fw_req_tmpl;
743 qat_req->areq = areq;
744 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
745 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
746 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
747 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
748 cipher_param->cipher_length = areq->cryptlen - digst_size;
749 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
750 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
751 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
752 auth_param->auth_off = 0;
753 auth_param->auth_len = areq->assoclen +
754 cipher_param->cipher_length + AES_BLOCK_SIZE;
756 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
757 } while (ret == -EAGAIN && ctr++ < 10);
759 if (ret == -EAGAIN) {
760 qat_alg_free_bufl(ctx->inst, qat_req);
766 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
769 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
770 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
771 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
772 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
773 struct icp_qat_fw_la_cipher_req_params *cipher_param;
774 struct icp_qat_fw_la_auth_req_params *auth_param;
775 struct icp_qat_fw_la_bulk_req *msg;
778 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
779 iv, AES_BLOCK_SIZE, qat_req);
784 *msg = ctx->enc_fw_req_tmpl;
786 qat_req->areq = areq;
787 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
788 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
789 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
790 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
791 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
794 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
795 cipher_param->cipher_offset = areq->assoclen;
797 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
798 cipher_param->cipher_length = areq->cryptlen;
799 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
801 auth_param->auth_off = 0;
802 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
805 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
806 } while (ret == -EAGAIN && ctr++ < 10);
808 if (ret == -EAGAIN) {
809 qat_alg_free_bufl(ctx->inst, qat_req);
815 static int qat_alg_enc(struct aead_request *areq)
817 return qat_alg_enc_internal(areq, areq->iv, 0);
820 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
822 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
823 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
824 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
827 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
828 seq = cpu_to_be64(req->seq);
829 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
830 &seq, sizeof(uint64_t));
831 return qat_alg_enc_internal(&req->areq, req->giv, 1);
834 static int qat_alg_init(struct crypto_tfm *tfm,
835 enum icp_qat_hw_auth_algo hash, const char *hash_name)
837 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
839 memset(ctx, '\0', sizeof(*ctx));
840 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
841 if (IS_ERR(ctx->hash_tfm))
843 spin_lock_init(&ctx->lock);
844 ctx->qat_hash_alg = hash;
845 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
846 sizeof(struct qat_crypto_request);
851 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
853 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
856 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
858 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
861 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
863 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
866 static void qat_alg_exit(struct crypto_tfm *tfm)
868 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
869 struct qat_crypto_instance *inst = ctx->inst;
872 if (!IS_ERR(ctx->hash_tfm))
873 crypto_free_shash(ctx->hash_tfm);
878 dev = &GET_DEV(inst->accel_dev);
880 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
881 ctx->enc_cd, ctx->enc_cd_paddr);
883 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
884 ctx->dec_cd, ctx->dec_cd_paddr);
885 qat_crypto_put_instance(inst);
888 static struct crypto_alg qat_algs[] = { {
889 .cra_name = "authenc(hmac(sha1),cbc(aes))",
890 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
891 .cra_priority = 4001,
892 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
893 .cra_blocksize = AES_BLOCK_SIZE,
894 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
896 .cra_type = &crypto_aead_type,
897 .cra_module = THIS_MODULE,
898 .cra_init = qat_alg_sha1_init,
899 .cra_exit = qat_alg_exit,
902 .setkey = qat_alg_setkey,
903 .decrypt = qat_alg_dec,
904 .encrypt = qat_alg_enc,
905 .givencrypt = qat_alg_genivenc,
906 .ivsize = AES_BLOCK_SIZE,
907 .maxauthsize = SHA1_DIGEST_SIZE,
911 .cra_name = "authenc(hmac(sha256),cbc(aes))",
912 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
913 .cra_priority = 4001,
914 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
915 .cra_blocksize = AES_BLOCK_SIZE,
916 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
918 .cra_type = &crypto_aead_type,
919 .cra_module = THIS_MODULE,
920 .cra_init = qat_alg_sha256_init,
921 .cra_exit = qat_alg_exit,
924 .setkey = qat_alg_setkey,
925 .decrypt = qat_alg_dec,
926 .encrypt = qat_alg_enc,
927 .givencrypt = qat_alg_genivenc,
928 .ivsize = AES_BLOCK_SIZE,
929 .maxauthsize = SHA256_DIGEST_SIZE,
933 .cra_name = "authenc(hmac(sha512),cbc(aes))",
934 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
935 .cra_priority = 4001,
936 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
937 .cra_blocksize = AES_BLOCK_SIZE,
938 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
940 .cra_type = &crypto_aead_type,
941 .cra_module = THIS_MODULE,
942 .cra_init = qat_alg_sha512_init,
943 .cra_exit = qat_alg_exit,
946 .setkey = qat_alg_setkey,
947 .decrypt = qat_alg_dec,
948 .encrypt = qat_alg_enc,
949 .givencrypt = qat_alg_genivenc,
950 .ivsize = AES_BLOCK_SIZE,
951 .maxauthsize = SHA512_DIGEST_SIZE,
956 int qat_algs_register(void)
958 if (atomic_add_return(1, &active_dev) == 1) {
961 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
962 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
964 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
969 int qat_algs_unregister(void)
971 if (atomic_sub_return(1, &active_dev) == 0)
972 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
976 int qat_algs_init(void)
978 atomic_set(&active_dev, 0);
979 crypto_get_default_rng();
983 void qat_algs_exit(void)
985 crypto_put_default_rng();