2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
32 #include "ssi_sram_mgr.h"
34 #define SSI_MAX_AHASH_SEQ_LEN 12
35 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
37 struct ssi_hash_handle {
38 ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
39 ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
40 struct list_head hash_list;
41 struct completion init_comp;
44 static const u32 digest_len_init[] = {
45 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46 static const u32 md5_init[] = {
47 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
48 static const u32 sha1_init[] = {
49 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
50 static const u32 sha224_init[] = {
51 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
53 static const u32 sha256_init[] = {
54 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56 #if (DX_DEV_SHA_MAX > 256)
57 static const u32 digest_len_sha512_init[] = {
58 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59 static const u64 sha384_init[] = {
60 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
62 static const u64 sha512_init[] = {
63 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
67 static void ssi_hash_create_xcbc_setup(
68 struct ahash_request *areq,
69 struct cc_hw_desc desc[],
70 unsigned int *seq_size);
72 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
73 struct cc_hw_desc desc[],
74 unsigned int *seq_size);
77 struct list_head entry;
81 struct ssi_drvdata *drvdata;
82 struct ahash_alg ahash_alg;
85 struct hash_key_req_ctx {
87 dma_addr_t key_dma_addr;
90 /* hash per-session context */
92 struct ssi_drvdata *drvdata;
93 /* holds the origin digest; the digest after "setkey" if HMAC,*
94 * the initial digest if HASH.
96 u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
97 u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
99 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
100 dma_addr_t digest_buff_dma_addr;
101 /* use for hmac with key large then mode block size */
102 struct hash_key_req_ctx key_params;
105 int inter_digestsize;
106 struct completion setkey_comp;
110 static void ssi_hash_create_data_desc(
111 struct ahash_req_ctx *areq_ctx,
112 struct ssi_hash_ctx *ctx,
113 unsigned int flow_mode, struct cc_hw_desc desc[],
114 bool is_not_last_data,
115 unsigned int *seq_size);
117 static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
119 if (unlikely((mode == DRV_HASH_MD5) ||
120 (mode == DRV_HASH_SHA384) ||
121 (mode == DRV_HASH_SHA512))) {
122 set_bytes_swap(desc, 1);
124 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
128 static int ssi_hash_map_result(struct device *dev,
129 struct ahash_req_ctx *state,
130 unsigned int digestsize)
132 state->digest_result_dma_addr =
133 dma_map_single(dev, (void *)state->digest_result_buff,
136 if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
137 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
141 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
142 digestsize, state->digest_result_buff,
143 &state->digest_result_dma_addr);
148 static int ssi_hash_map_request(struct device *dev,
149 struct ahash_req_ctx *state,
150 struct ssi_hash_ctx *ctx)
152 bool is_hmac = ctx->is_hmac;
153 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
154 ctx->drvdata, ctx->hash_mode);
155 struct ssi_crypto_req ssi_req = {};
156 struct cc_hw_desc desc;
159 state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
163 state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
167 state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
168 if (!state->digest_result_buff)
171 state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
172 if (!state->digest_buff)
173 goto fail_digest_result_buff;
175 dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
177 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
178 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
179 if (!state->digest_bytes_len)
182 dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
183 state->digest_bytes_len);
185 state->digest_bytes_len = NULL;
188 state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
189 if (!state->opad_digest_buff)
192 dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
193 state->opad_digest_buff);
195 state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
196 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
197 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
198 ctx->inter_digestsize, state->digest_buff);
201 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
202 ctx->inter_digestsize, state->digest_buff,
203 &state->digest_buff_dma_addr);
206 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
207 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
208 memset(state->digest_buff, 0, ctx->inter_digestsize);
210 memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
211 #if (DX_DEV_SHA_MAX > 256)
212 if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
213 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
215 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
217 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
220 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
222 if (ctx->hash_mode != DRV_HASH_NULL) {
223 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
224 memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
227 /* Copy the initial digests if hash flow. The SRAM contains the
228 * initial digests in the expected order for all SHA*
231 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
232 set_dout_dlli(&desc, state->digest_buff_dma_addr,
233 ctx->inter_digestsize, NS_BIT, 0);
234 set_flow_mode(&desc, BYPASS);
236 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
237 if (unlikely(rc != 0)) {
238 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
243 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
244 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
245 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
246 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
247 HASH_LEN_SIZE, state->digest_bytes_len);
250 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
251 HASH_LEN_SIZE, state->digest_bytes_len,
252 &state->digest_bytes_len_dma_addr);
254 state->digest_bytes_len_dma_addr = 0;
257 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
258 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
259 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
260 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
261 ctx->inter_digestsize,
262 state->opad_digest_buff);
265 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
266 ctx->inter_digestsize, state->opad_digest_buff,
267 &state->opad_digest_dma_addr);
269 state->opad_digest_dma_addr = 0;
271 state->buff0_cnt = 0;
272 state->buff1_cnt = 0;
273 state->buff_index = 0;
274 state->mlli_params.curr_pool = NULL;
279 if (state->digest_bytes_len_dma_addr != 0) {
280 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
281 state->digest_bytes_len_dma_addr = 0;
284 if (state->digest_buff_dma_addr != 0) {
285 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
286 state->digest_buff_dma_addr = 0;
289 kfree(state->opad_digest_buff);
291 kfree(state->digest_bytes_len);
293 kfree(state->digest_buff);
294 fail_digest_result_buff:
295 kfree(state->digest_result_buff);
296 state->digest_result_buff = NULL;
307 static void ssi_hash_unmap_request(struct device *dev,
308 struct ahash_req_ctx *state,
309 struct ssi_hash_ctx *ctx)
311 if (state->digest_buff_dma_addr != 0) {
312 dma_unmap_single(dev, state->digest_buff_dma_addr,
313 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
314 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
315 &state->digest_buff_dma_addr);
316 state->digest_buff_dma_addr = 0;
318 if (state->digest_bytes_len_dma_addr != 0) {
319 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
320 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
321 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
322 &state->digest_bytes_len_dma_addr);
323 state->digest_bytes_len_dma_addr = 0;
325 if (state->opad_digest_dma_addr != 0) {
326 dma_unmap_single(dev, state->opad_digest_dma_addr,
327 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
328 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
329 &state->opad_digest_dma_addr);
330 state->opad_digest_dma_addr = 0;
333 kfree(state->opad_digest_buff);
334 kfree(state->digest_bytes_len);
335 kfree(state->digest_buff);
336 kfree(state->digest_result_buff);
341 static void ssi_hash_unmap_result(struct device *dev,
342 struct ahash_req_ctx *state,
343 unsigned int digestsize, u8 *result)
345 if (state->digest_result_dma_addr != 0) {
346 dma_unmap_single(dev,
347 state->digest_result_dma_addr,
350 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
351 state->digest_result_buff,
352 &state->digest_result_dma_addr, digestsize);
354 state->digest_result_buff,
357 state->digest_result_dma_addr = 0;
360 static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
362 struct ahash_request *req = (struct ahash_request *)ssi_req;
363 struct ahash_req_ctx *state = ahash_request_ctx(req);
365 dev_dbg(dev, "req=%pK\n", req);
367 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
368 req->base.complete(&req->base, 0);
371 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
373 struct ahash_request *req = (struct ahash_request *)ssi_req;
374 struct ahash_req_ctx *state = ahash_request_ctx(req);
375 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
377 u32 digestsize = crypto_ahash_digestsize(tfm);
379 dev_dbg(dev, "req=%pK\n", req);
381 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
382 ssi_hash_unmap_result(dev, state, digestsize, req->result);
383 ssi_hash_unmap_request(dev, state, ctx);
384 req->base.complete(&req->base, 0);
387 static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
389 struct ahash_request *req = (struct ahash_request *)ssi_req;
390 struct ahash_req_ctx *state = ahash_request_ctx(req);
391 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
392 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
393 u32 digestsize = crypto_ahash_digestsize(tfm);
395 dev_dbg(dev, "req=%pK\n", req);
397 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
398 ssi_hash_unmap_result(dev, state, digestsize, req->result);
399 ssi_hash_unmap_request(dev, state, ctx);
400 req->base.complete(&req->base, 0);
403 static int ssi_hash_digest(struct ahash_req_ctx *state,
404 struct ssi_hash_ctx *ctx,
405 unsigned int digestsize,
406 struct scatterlist *src,
407 unsigned int nbytes, u8 *result,
410 struct device *dev = drvdata_to_dev(ctx->drvdata);
411 bool is_hmac = ctx->is_hmac;
412 struct ssi_crypto_req ssi_req = {};
413 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
414 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
415 ctx->drvdata, ctx->hash_mode);
419 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
422 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
423 dev_err(dev, "map_ahash_source() failed\n");
427 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
428 dev_err(dev, "map_ahash_digest() failed\n");
432 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
433 dev_err(dev, "map_ahash_request_final() failed\n");
438 /* Setup DX request structure */
439 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
440 ssi_req.user_arg = (void *)async_req;
443 /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
444 hw_desc_init(&desc[idx]);
445 set_cipher_mode(&desc[idx], ctx->hw_mode);
447 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
448 ctx->inter_digestsize, NS_BIT);
450 set_din_sram(&desc[idx], larval_digest_addr,
451 ctx->inter_digestsize);
453 set_flow_mode(&desc[idx], S_DIN_to_HASH);
454 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
457 /* Load the hash current length */
458 hw_desc_init(&desc[idx]);
459 set_cipher_mode(&desc[idx], ctx->hw_mode);
462 set_din_type(&desc[idx], DMA_DLLI,
463 state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
466 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
467 if (likely(nbytes != 0))
468 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
470 set_cipher_do(&desc[idx], DO_PAD);
472 set_flow_mode(&desc[idx], S_DIN_to_HASH);
473 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
476 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
479 /* HW last hash block padding (aka. "DO_PAD") */
480 hw_desc_init(&desc[idx]);
481 set_cipher_mode(&desc[idx], ctx->hw_mode);
482 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
483 HASH_LEN_SIZE, NS_BIT, 0);
484 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
485 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
486 set_cipher_do(&desc[idx], DO_PAD);
489 /* store the hash digest result in the context */
490 hw_desc_init(&desc[idx]);
491 set_cipher_mode(&desc[idx], ctx->hw_mode);
492 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
493 digestsize, NS_BIT, 0);
494 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
495 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
496 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
499 /* Loading hash opad xor key state */
500 hw_desc_init(&desc[idx]);
501 set_cipher_mode(&desc[idx], ctx->hw_mode);
502 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
503 ctx->inter_digestsize, NS_BIT);
504 set_flow_mode(&desc[idx], S_DIN_to_HASH);
505 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
508 /* Load the hash current length */
509 hw_desc_init(&desc[idx]);
510 set_cipher_mode(&desc[idx], ctx->hw_mode);
511 set_din_sram(&desc[idx],
512 ssi_ahash_get_initial_digest_len_sram_addr(
513 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
514 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
515 set_flow_mode(&desc[idx], S_DIN_to_HASH);
516 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
519 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
520 hw_desc_init(&desc[idx]);
521 set_din_no_dma(&desc[idx], 0, 0xfffff0);
522 set_dout_no_dma(&desc[idx], 0, 0, 1);
525 /* Perform HASH update */
526 hw_desc_init(&desc[idx]);
527 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
529 set_flow_mode(&desc[idx], DIN_HASH);
533 /* Get final MAC result */
534 hw_desc_init(&desc[idx]);
535 set_cipher_mode(&desc[idx], ctx->hw_mode);
537 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
538 NS_BIT, (async_req ? 1 : 0));
540 set_queue_last_ind(&desc[idx]);
541 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
542 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
543 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
544 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
548 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
549 if (unlikely(rc != -EINPROGRESS)) {
550 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
551 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
552 ssi_hash_unmap_result(dev, state, digestsize, result);
553 ssi_hash_unmap_request(dev, state, ctx);
556 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
558 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
559 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
561 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
563 ssi_hash_unmap_result(dev, state, digestsize, result);
564 ssi_hash_unmap_request(dev, state, ctx);
569 static int ssi_hash_update(struct ahash_req_ctx *state,
570 struct ssi_hash_ctx *ctx,
571 unsigned int block_size,
572 struct scatterlist *src,
576 struct device *dev = drvdata_to_dev(ctx->drvdata);
577 struct ssi_crypto_req ssi_req = {};
578 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
582 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
583 "hmac" : "hash", nbytes);
586 /* no real updates required */
590 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
593 dev_dbg(dev, " data size not require HW update %x\n",
595 /* No hardware updates are required */
598 dev_err(dev, "map_ahash_request_update() failed\n");
603 /* Setup DX request structure */
604 ssi_req.user_cb = (void *)ssi_hash_update_complete;
605 ssi_req.user_arg = async_req;
608 /* Restore hash digest */
609 hw_desc_init(&desc[idx]);
610 set_cipher_mode(&desc[idx], ctx->hw_mode);
611 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
612 ctx->inter_digestsize, NS_BIT);
613 set_flow_mode(&desc[idx], S_DIN_to_HASH);
614 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
616 /* Restore hash current length */
617 hw_desc_init(&desc[idx]);
618 set_cipher_mode(&desc[idx], ctx->hw_mode);
619 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
620 HASH_LEN_SIZE, NS_BIT);
621 set_flow_mode(&desc[idx], S_DIN_to_HASH);
622 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
625 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
627 /* store the hash digest result in context */
628 hw_desc_init(&desc[idx]);
629 set_cipher_mode(&desc[idx], ctx->hw_mode);
630 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
631 ctx->inter_digestsize, NS_BIT, 0);
632 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
633 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
636 /* store current hash length in context */
637 hw_desc_init(&desc[idx]);
638 set_cipher_mode(&desc[idx], ctx->hw_mode);
639 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
640 HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
642 set_queue_last_ind(&desc[idx]);
643 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
644 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
648 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
649 if (unlikely(rc != -EINPROGRESS)) {
650 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
651 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
654 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
656 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
657 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
659 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
665 static int ssi_hash_finup(struct ahash_req_ctx *state,
666 struct ssi_hash_ctx *ctx,
667 unsigned int digestsize,
668 struct scatterlist *src,
673 struct device *dev = drvdata_to_dev(ctx->drvdata);
674 bool is_hmac = ctx->is_hmac;
675 struct ssi_crypto_req ssi_req = {};
676 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
680 dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
683 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
684 dev_err(dev, "map_ahash_request_final() failed\n");
687 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
688 dev_err(dev, "map_ahash_digest() failed\n");
693 /* Setup DX request structure */
694 ssi_req.user_cb = (void *)ssi_hash_complete;
695 ssi_req.user_arg = async_req;
698 /* Restore hash digest */
699 hw_desc_init(&desc[idx]);
700 set_cipher_mode(&desc[idx], ctx->hw_mode);
701 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
702 ctx->inter_digestsize, NS_BIT);
703 set_flow_mode(&desc[idx], S_DIN_to_HASH);
704 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
707 /* Restore hash current length */
708 hw_desc_init(&desc[idx]);
709 set_cipher_mode(&desc[idx], ctx->hw_mode);
710 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
711 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
712 HASH_LEN_SIZE, NS_BIT);
713 set_flow_mode(&desc[idx], S_DIN_to_HASH);
714 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
717 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
720 /* Store the hash digest result in the context */
721 hw_desc_init(&desc[idx]);
722 set_cipher_mode(&desc[idx], ctx->hw_mode);
723 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
724 digestsize, NS_BIT, 0);
725 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
726 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
727 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
730 /* Loading hash OPAD xor key state */
731 hw_desc_init(&desc[idx]);
732 set_cipher_mode(&desc[idx], ctx->hw_mode);
733 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
734 ctx->inter_digestsize, NS_BIT);
735 set_flow_mode(&desc[idx], S_DIN_to_HASH);
736 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
739 /* Load the hash current length */
740 hw_desc_init(&desc[idx]);
741 set_cipher_mode(&desc[idx], ctx->hw_mode);
742 set_din_sram(&desc[idx],
743 ssi_ahash_get_initial_digest_len_sram_addr(
744 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
745 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
746 set_flow_mode(&desc[idx], S_DIN_to_HASH);
747 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
750 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
751 hw_desc_init(&desc[idx]);
752 set_din_no_dma(&desc[idx], 0, 0xfffff0);
753 set_dout_no_dma(&desc[idx], 0, 0, 1);
756 /* Perform HASH update on last digest */
757 hw_desc_init(&desc[idx]);
758 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
760 set_flow_mode(&desc[idx], DIN_HASH);
764 /* Get final MAC result */
765 hw_desc_init(&desc[idx]);
767 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
768 NS_BIT, (async_req ? 1 : 0));
770 set_queue_last_ind(&desc[idx]);
771 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
772 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
773 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
774 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
775 set_cipher_mode(&desc[idx], ctx->hw_mode);
779 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
780 if (unlikely(rc != -EINPROGRESS)) {
781 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
782 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
783 ssi_hash_unmap_result(dev, state, digestsize, result);
786 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
788 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
789 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
790 ssi_hash_unmap_result(dev, state, digestsize, result);
792 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
793 ssi_hash_unmap_result(dev, state, digestsize, result);
794 ssi_hash_unmap_request(dev, state, ctx);
800 static int ssi_hash_final(struct ahash_req_ctx *state,
801 struct ssi_hash_ctx *ctx,
802 unsigned int digestsize,
803 struct scatterlist *src,
808 struct device *dev = drvdata_to_dev(ctx->drvdata);
809 bool is_hmac = ctx->is_hmac;
810 struct ssi_crypto_req ssi_req = {};
811 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
815 dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
818 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
819 dev_err(dev, "map_ahash_request_final() failed\n");
823 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
824 dev_err(dev, "map_ahash_digest() failed\n");
829 /* Setup DX request structure */
830 ssi_req.user_cb = (void *)ssi_hash_complete;
831 ssi_req.user_arg = async_req;
834 /* Restore hash digest */
835 hw_desc_init(&desc[idx]);
836 set_cipher_mode(&desc[idx], ctx->hw_mode);
837 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
838 ctx->inter_digestsize, NS_BIT);
839 set_flow_mode(&desc[idx], S_DIN_to_HASH);
840 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
843 /* Restore hash current length */
844 hw_desc_init(&desc[idx]);
845 set_cipher_mode(&desc[idx], ctx->hw_mode);
846 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
847 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
848 HASH_LEN_SIZE, NS_BIT);
849 set_flow_mode(&desc[idx], S_DIN_to_HASH);
850 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
853 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
855 /* "DO-PAD" must be enabled only when writing current length to HW */
856 hw_desc_init(&desc[idx]);
857 set_cipher_do(&desc[idx], DO_PAD);
858 set_cipher_mode(&desc[idx], ctx->hw_mode);
859 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
860 HASH_LEN_SIZE, NS_BIT, 0);
861 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
862 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
866 /* Store the hash digest result in the context */
867 hw_desc_init(&desc[idx]);
868 set_cipher_mode(&desc[idx], ctx->hw_mode);
869 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
870 digestsize, NS_BIT, 0);
871 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
872 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
873 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
876 /* Loading hash OPAD xor key state */
877 hw_desc_init(&desc[idx]);
878 set_cipher_mode(&desc[idx], ctx->hw_mode);
879 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
880 ctx->inter_digestsize, NS_BIT);
881 set_flow_mode(&desc[idx], S_DIN_to_HASH);
882 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
885 /* Load the hash current length */
886 hw_desc_init(&desc[idx]);
887 set_cipher_mode(&desc[idx], ctx->hw_mode);
888 set_din_sram(&desc[idx],
889 ssi_ahash_get_initial_digest_len_sram_addr(
890 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
891 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
892 set_flow_mode(&desc[idx], S_DIN_to_HASH);
893 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
896 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
897 hw_desc_init(&desc[idx]);
898 set_din_no_dma(&desc[idx], 0, 0xfffff0);
899 set_dout_no_dma(&desc[idx], 0, 0, 1);
902 /* Perform HASH update on last digest */
903 hw_desc_init(&desc[idx]);
904 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
906 set_flow_mode(&desc[idx], DIN_HASH);
910 /* Get final MAC result */
911 hw_desc_init(&desc[idx]);
912 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
913 NS_BIT, (async_req ? 1 : 0));
915 set_queue_last_ind(&desc[idx]);
916 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
917 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
918 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
919 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
920 set_cipher_mode(&desc[idx], ctx->hw_mode);
924 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
925 if (unlikely(rc != -EINPROGRESS)) {
926 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
927 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
928 ssi_hash_unmap_result(dev, state, digestsize, result);
931 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
933 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
934 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
935 ssi_hash_unmap_result(dev, state, digestsize, result);
937 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
938 ssi_hash_unmap_result(dev, state, digestsize, result);
939 ssi_hash_unmap_request(dev, state, ctx);
945 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
947 struct device *dev = drvdata_to_dev(ctx->drvdata);
949 state->xcbc_count = 0;
951 ssi_hash_map_request(dev, state, ctx);
956 static int ssi_hash_setkey(void *hash,
961 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
962 struct ssi_crypto_req ssi_req = {};
963 struct ssi_hash_ctx *ctx = NULL;
966 int i, idx = 0, rc = 0;
967 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
968 ssi_sram_addr_t larval_addr;
971 ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
972 dev = drvdata_to_dev(ctx->drvdata);
973 dev_dbg(dev, "start keylen: %d", keylen);
975 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
976 digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
978 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
979 ctx->drvdata, ctx->hash_mode);
981 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
982 * any NON-ZERO value utilizes HMAC flow
984 ctx->key_params.keylen = keylen;
985 ctx->key_params.key_dma_addr = 0;
989 ctx->key_params.key_dma_addr = dma_map_single(
991 keylen, DMA_TO_DEVICE);
992 if (unlikely(dma_mapping_error(dev,
993 ctx->key_params.key_dma_addr))) {
994 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
998 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
999 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1001 if (keylen > blocksize) {
1002 /* Load hash initial state */
1003 hw_desc_init(&desc[idx]);
1004 set_cipher_mode(&desc[idx], ctx->hw_mode);
1005 set_din_sram(&desc[idx], larval_addr,
1006 ctx->inter_digestsize);
1007 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1008 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1011 /* Load the hash current length*/
1012 hw_desc_init(&desc[idx]);
1013 set_cipher_mode(&desc[idx], ctx->hw_mode);
1014 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1015 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1016 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1017 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1020 hw_desc_init(&desc[idx]);
1021 set_din_type(&desc[idx], DMA_DLLI,
1022 ctx->key_params.key_dma_addr, keylen,
1024 set_flow_mode(&desc[idx], DIN_HASH);
1027 /* Get hashed key */
1028 hw_desc_init(&desc[idx]);
1029 set_cipher_mode(&desc[idx], ctx->hw_mode);
1030 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1031 digestsize, NS_BIT, 0);
1032 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1033 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1034 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
1035 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
1038 hw_desc_init(&desc[idx]);
1039 set_din_const(&desc[idx], 0, (blocksize - digestsize));
1040 set_flow_mode(&desc[idx], BYPASS);
1041 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1043 (blocksize - digestsize), NS_BIT, 0);
1046 hw_desc_init(&desc[idx]);
1047 set_din_type(&desc[idx], DMA_DLLI,
1048 ctx->key_params.key_dma_addr, keylen,
1050 set_flow_mode(&desc[idx], BYPASS);
1051 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1055 if ((blocksize - keylen) != 0) {
1056 hw_desc_init(&desc[idx]);
1057 set_din_const(&desc[idx], 0,
1058 (blocksize - keylen));
1059 set_flow_mode(&desc[idx], BYPASS);
1060 set_dout_dlli(&desc[idx],
1061 (ctx->opad_tmp_keys_dma_addr +
1062 keylen), (blocksize - keylen),
1068 hw_desc_init(&desc[idx]);
1069 set_din_const(&desc[idx], 0, blocksize);
1070 set_flow_mode(&desc[idx], BYPASS);
1071 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1072 blocksize, NS_BIT, 0);
1076 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1077 if (unlikely(rc != 0)) {
1078 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1082 /* calc derived HMAC key */
1083 for (idx = 0, i = 0; i < 2; i++) {
1084 /* Load hash initial state */
1085 hw_desc_init(&desc[idx]);
1086 set_cipher_mode(&desc[idx], ctx->hw_mode);
1087 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1088 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1089 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1092 /* Load the hash current length*/
1093 hw_desc_init(&desc[idx]);
1094 set_cipher_mode(&desc[idx], ctx->hw_mode);
1095 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1096 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1097 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1100 /* Prepare ipad key */
1101 hw_desc_init(&desc[idx]);
1102 set_xor_val(&desc[idx], hmac_pad_const[i]);
1103 set_cipher_mode(&desc[idx], ctx->hw_mode);
1104 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1105 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1108 /* Perform HASH update */
1109 hw_desc_init(&desc[idx]);
1110 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1112 set_cipher_mode(&desc[idx], ctx->hw_mode);
1113 set_xor_active(&desc[idx]);
1114 set_flow_mode(&desc[idx], DIN_HASH);
1117 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1118 hw_desc_init(&desc[idx]);
1119 set_cipher_mode(&desc[idx], ctx->hw_mode);
1120 if (i > 0) /* Not first iteration */
1121 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1122 ctx->inter_digestsize, NS_BIT, 0);
1123 else /* First iteration */
1124 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1125 ctx->inter_digestsize, NS_BIT, 0);
1126 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1127 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1131 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1135 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1137 if (ctx->key_params.key_dma_addr) {
1138 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1139 ctx->key_params.keylen, DMA_TO_DEVICE);
1140 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1141 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1146 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1147 const u8 *key, unsigned int keylen)
1149 struct ssi_crypto_req ssi_req = {};
1150 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1151 struct device *dev = drvdata_to_dev(ctx->drvdata);
1152 int idx = 0, rc = 0;
1153 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1155 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1158 case AES_KEYSIZE_128:
1159 case AES_KEYSIZE_192:
1160 case AES_KEYSIZE_256:
1166 ctx->key_params.keylen = keylen;
1168 ctx->key_params.key_dma_addr = dma_map_single(
1170 keylen, DMA_TO_DEVICE);
1171 if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
1172 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1176 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1177 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1179 ctx->is_hmac = true;
1180 /* 1. Load the AES key */
1181 hw_desc_init(&desc[idx]);
1182 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1184 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1185 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1186 set_key_size_aes(&desc[idx], keylen);
1187 set_flow_mode(&desc[idx], S_DIN_to_AES);
1188 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1191 hw_desc_init(&desc[idx]);
1192 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1193 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1194 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1195 XCBC_MAC_K1_OFFSET),
1196 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1199 hw_desc_init(&desc[idx]);
1200 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1201 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1202 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1203 XCBC_MAC_K2_OFFSET),
1204 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1207 hw_desc_init(&desc[idx]);
1208 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1209 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1210 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1211 XCBC_MAC_K3_OFFSET),
1212 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1215 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1218 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1220 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1221 ctx->key_params.keylen, DMA_TO_DEVICE);
1222 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1223 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1229 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1230 const u8 *key, unsigned int keylen)
1232 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1233 struct device *dev = drvdata_to_dev(ctx->drvdata);
1235 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1237 ctx->is_hmac = true;
1240 case AES_KEYSIZE_128:
1241 case AES_KEYSIZE_192:
1242 case AES_KEYSIZE_256:
1248 ctx->key_params.keylen = keylen;
1250 /* STAT_PHASE_1: Copy key to ctx */
1252 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1253 keylen, DMA_TO_DEVICE);
1255 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1257 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1259 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1260 keylen, DMA_TO_DEVICE);
1262 ctx->key_params.keylen = keylen;
1268 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1270 struct device *dev = drvdata_to_dev(ctx->drvdata);
1272 if (ctx->digest_buff_dma_addr != 0) {
1273 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1274 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1275 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1276 &ctx->digest_buff_dma_addr);
1277 ctx->digest_buff_dma_addr = 0;
1279 if (ctx->opad_tmp_keys_dma_addr != 0) {
1280 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1281 sizeof(ctx->opad_tmp_keys_buff),
1283 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1284 &ctx->opad_tmp_keys_dma_addr);
1285 ctx->opad_tmp_keys_dma_addr = 0;
1288 ctx->key_params.keylen = 0;
1291 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1293 struct device *dev = drvdata_to_dev(ctx->drvdata);
1295 ctx->key_params.keylen = 0;
1297 ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1298 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1299 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1300 sizeof(ctx->digest_buff), ctx->digest_buff);
1303 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1304 sizeof(ctx->digest_buff), ctx->digest_buff,
1305 &ctx->digest_buff_dma_addr);
1307 ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1308 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1309 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1310 sizeof(ctx->opad_tmp_keys_buff),
1311 ctx->opad_tmp_keys_buff);
1314 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1315 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1316 &ctx->opad_tmp_keys_dma_addr);
1318 ctx->is_hmac = false;
1322 ssi_hash_free_ctx(ctx);
1326 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1328 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1329 struct hash_alg_common *hash_alg_common =
1330 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1331 struct ahash_alg *ahash_alg =
1332 container_of(hash_alg_common, struct ahash_alg, halg);
1333 struct ssi_hash_alg *ssi_alg =
1334 container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1336 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1337 sizeof(struct ahash_req_ctx));
1339 ctx->hash_mode = ssi_alg->hash_mode;
1340 ctx->hw_mode = ssi_alg->hw_mode;
1341 ctx->inter_digestsize = ssi_alg->inter_digestsize;
1342 ctx->drvdata = ssi_alg->drvdata;
1344 return ssi_hash_alloc_ctx(ctx);
1347 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1349 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1350 struct device *dev = drvdata_to_dev(ctx->drvdata);
1352 dev_dbg(dev, "ssi_hash_cra_exit");
1353 ssi_hash_free_ctx(ctx);
1356 static int ssi_mac_update(struct ahash_request *req)
1358 struct ahash_req_ctx *state = ahash_request_ctx(req);
1359 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1360 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1361 struct device *dev = drvdata_to_dev(ctx->drvdata);
1362 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1363 struct ssi_crypto_req ssi_req = {};
1364 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1368 if (req->nbytes == 0) {
1369 /* no real updates required */
1373 state->xcbc_count++;
1375 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
1378 dev_dbg(dev, " data size not require HW update %x\n",
1380 /* No hardware updates are required */
1383 dev_err(dev, "map_ahash_request_update() failed\n");
1387 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1388 ssi_hash_create_xcbc_setup(req, desc, &idx);
1390 ssi_hash_create_cmac_setup(req, desc, &idx);
1392 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1394 /* store the hash digest result in context */
1395 hw_desc_init(&desc[idx]);
1396 set_cipher_mode(&desc[idx], ctx->hw_mode);
1397 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1398 ctx->inter_digestsize, NS_BIT, 1);
1399 set_queue_last_ind(&desc[idx]);
1400 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1401 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1404 /* Setup DX request structure */
1405 ssi_req.user_cb = (void *)ssi_hash_update_complete;
1406 ssi_req.user_arg = (void *)req;
1408 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1409 if (unlikely(rc != -EINPROGRESS)) {
1410 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1411 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1416 static int ssi_mac_final(struct ahash_request *req)
1418 struct ahash_req_ctx *state = ahash_request_ctx(req);
1419 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1420 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1421 struct device *dev = drvdata_to_dev(ctx->drvdata);
1422 struct ssi_crypto_req ssi_req = {};
1423 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1426 u32 key_size, key_len;
1427 u32 digestsize = crypto_ahash_digestsize(tfm);
1429 u32 rem_cnt = state->buff_index ? state->buff1_cnt :
1432 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1433 key_size = CC_AES_128_BIT_KEY_SIZE;
1434 key_len = CC_AES_128_BIT_KEY_SIZE;
1436 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1437 ctx->key_params.keylen;
1438 key_len = ctx->key_params.keylen;
1441 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1443 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
1444 dev_err(dev, "map_ahash_request_final() failed\n");
1448 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1449 dev_err(dev, "map_ahash_digest() failed\n");
1453 /* Setup DX request structure */
1454 ssi_req.user_cb = (void *)ssi_hash_complete;
1455 ssi_req.user_arg = (void *)req;
1457 if (state->xcbc_count && (rem_cnt == 0)) {
1458 /* Load key for ECB decryption */
1459 hw_desc_init(&desc[idx]);
1460 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1461 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1462 set_din_type(&desc[idx], DMA_DLLI,
1463 (ctx->opad_tmp_keys_dma_addr +
1464 XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1465 set_key_size_aes(&desc[idx], key_len);
1466 set_flow_mode(&desc[idx], S_DIN_to_AES);
1467 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1470 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1471 hw_desc_init(&desc[idx]);
1472 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1473 CC_AES_BLOCK_SIZE, NS_BIT);
1474 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1475 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1476 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1479 /* Memory Barrier: wait for axi write to complete */
1480 hw_desc_init(&desc[idx]);
1481 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1482 set_dout_no_dma(&desc[idx], 0, 0, 1);
1486 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1487 ssi_hash_create_xcbc_setup(req, desc, &idx);
1489 ssi_hash_create_cmac_setup(req, desc, &idx);
1491 if (state->xcbc_count == 0) {
1492 hw_desc_init(&desc[idx]);
1493 set_cipher_mode(&desc[idx], ctx->hw_mode);
1494 set_key_size_aes(&desc[idx], key_len);
1495 set_cmac_size0_mode(&desc[idx]);
1496 set_flow_mode(&desc[idx], S_DIN_to_AES);
1498 } else if (rem_cnt > 0) {
1499 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1501 hw_desc_init(&desc[idx]);
1502 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1503 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1507 /* Get final MAC result */
1508 hw_desc_init(&desc[idx]);
1510 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1511 digestsize, NS_BIT, 1);
1512 set_queue_last_ind(&desc[idx]);
1513 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1514 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1515 set_cipher_mode(&desc[idx], ctx->hw_mode);
1518 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1519 if (unlikely(rc != -EINPROGRESS)) {
1520 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1521 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1522 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1527 static int ssi_mac_finup(struct ahash_request *req)
1529 struct ahash_req_ctx *state = ahash_request_ctx(req);
1530 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1531 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1532 struct device *dev = drvdata_to_dev(ctx->drvdata);
1533 struct ssi_crypto_req ssi_req = {};
1534 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1538 u32 digestsize = crypto_ahash_digestsize(tfm);
1540 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1541 if (state->xcbc_count > 0 && req->nbytes == 0) {
1542 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1543 return ssi_mac_final(req);
1546 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1547 dev_err(dev, "map_ahash_request_final() failed\n");
1550 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1551 dev_err(dev, "map_ahash_digest() failed\n");
1555 /* Setup DX request structure */
1556 ssi_req.user_cb = (void *)ssi_hash_complete;
1557 ssi_req.user_arg = (void *)req;
1559 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1560 key_len = CC_AES_128_BIT_KEY_SIZE;
1561 ssi_hash_create_xcbc_setup(req, desc, &idx);
1563 key_len = ctx->key_params.keylen;
1564 ssi_hash_create_cmac_setup(req, desc, &idx);
1567 if (req->nbytes == 0) {
1568 hw_desc_init(&desc[idx]);
1569 set_cipher_mode(&desc[idx], ctx->hw_mode);
1570 set_key_size_aes(&desc[idx], key_len);
1571 set_cmac_size0_mode(&desc[idx]);
1572 set_flow_mode(&desc[idx], S_DIN_to_AES);
1575 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1578 /* Get final MAC result */
1579 hw_desc_init(&desc[idx]);
1581 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1582 digestsize, NS_BIT, 1);
1583 set_queue_last_ind(&desc[idx]);
1584 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1585 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1586 set_cipher_mode(&desc[idx], ctx->hw_mode);
1589 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1590 if (unlikely(rc != -EINPROGRESS)) {
1591 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1592 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1593 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1598 static int ssi_mac_digest(struct ahash_request *req)
1600 struct ahash_req_ctx *state = ahash_request_ctx(req);
1601 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1602 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1603 struct device *dev = drvdata_to_dev(ctx->drvdata);
1604 u32 digestsize = crypto_ahash_digestsize(tfm);
1605 struct ssi_crypto_req ssi_req = {};
1606 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1611 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1613 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
1614 dev_err(dev, "map_ahash_source() failed\n");
1617 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1618 dev_err(dev, "map_ahash_digest() failed\n");
1622 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1623 dev_err(dev, "map_ahash_request_final() failed\n");
1627 /* Setup DX request structure */
1628 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1629 ssi_req.user_arg = (void *)req;
1631 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1632 key_len = CC_AES_128_BIT_KEY_SIZE;
1633 ssi_hash_create_xcbc_setup(req, desc, &idx);
1635 key_len = ctx->key_params.keylen;
1636 ssi_hash_create_cmac_setup(req, desc, &idx);
1639 if (req->nbytes == 0) {
1640 hw_desc_init(&desc[idx]);
1641 set_cipher_mode(&desc[idx], ctx->hw_mode);
1642 set_key_size_aes(&desc[idx], key_len);
1643 set_cmac_size0_mode(&desc[idx]);
1644 set_flow_mode(&desc[idx], S_DIN_to_AES);
1647 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1650 /* Get final MAC result */
1651 hw_desc_init(&desc[idx]);
1652 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1653 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1654 set_queue_last_ind(&desc[idx]);
1655 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1656 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1657 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1658 set_cipher_mode(&desc[idx], ctx->hw_mode);
1661 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1662 if (unlikely(rc != -EINPROGRESS)) {
1663 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1664 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1665 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1666 ssi_hash_unmap_request(dev, state, ctx);
1671 //ahash wrap functions
1672 static int ssi_ahash_digest(struct ahash_request *req)
1674 struct ahash_req_ctx *state = ahash_request_ctx(req);
1675 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1676 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1677 u32 digestsize = crypto_ahash_digestsize(tfm);
1679 return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1682 static int ssi_ahash_update(struct ahash_request *req)
1684 struct ahash_req_ctx *state = ahash_request_ctx(req);
1685 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1686 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1687 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1689 return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1692 static int ssi_ahash_finup(struct ahash_request *req)
1694 struct ahash_req_ctx *state = ahash_request_ctx(req);
1695 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1696 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1697 u32 digestsize = crypto_ahash_digestsize(tfm);
1699 return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1702 static int ssi_ahash_final(struct ahash_request *req)
1704 struct ahash_req_ctx *state = ahash_request_ctx(req);
1705 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1706 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1707 u32 digestsize = crypto_ahash_digestsize(tfm);
1709 return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1712 static int ssi_ahash_init(struct ahash_request *req)
1714 struct ahash_req_ctx *state = ahash_request_ctx(req);
1715 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1716 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1717 struct device *dev = drvdata_to_dev(ctx->drvdata);
1719 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
1721 return ssi_hash_init(state, ctx);
1724 static int ssi_ahash_export(struct ahash_request *req, void *out)
1726 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1727 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1728 struct device *dev = drvdata_to_dev(ctx->drvdata);
1729 struct ahash_req_ctx *state = ahash_request_ctx(req);
1730 u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1731 u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1733 const u32 tmp = CC_EXPORT_MAGIC;
1735 memcpy(out, &tmp, sizeof(u32));
1738 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1739 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1740 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1741 out += ctx->inter_digestsize;
1743 if (state->digest_bytes_len_dma_addr) {
1744 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1745 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1746 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1748 /* Poison the unused exported digest len field. */
1749 memset(out, 0x5F, HASH_LEN_SIZE);
1751 out += HASH_LEN_SIZE;
1753 memcpy(out, &curr_buff_cnt, sizeof(u32));
1756 memcpy(out, curr_buff, curr_buff_cnt);
1758 /* No sync for device ineeded since we did not change the data,
1765 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1767 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1768 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1769 struct device *dev = drvdata_to_dev(ctx->drvdata);
1770 struct ahash_req_ctx *state = ahash_request_ctx(req);
1774 memcpy(&tmp, in, sizeof(u32));
1775 if (tmp != CC_EXPORT_MAGIC) {
1781 /* call init() to allocate bufs if the user hasn't */
1782 if (!state->digest_buff) {
1783 rc = ssi_hash_init(state, ctx);
1788 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1789 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1790 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1791 in += ctx->inter_digestsize;
1793 if (state->digest_bytes_len_dma_addr) {
1794 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1795 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1796 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1798 in += HASH_LEN_SIZE;
1800 dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1801 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1803 if (state->digest_bytes_len_dma_addr)
1804 dma_sync_single_for_device(dev,
1805 state->digest_bytes_len_dma_addr,
1806 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1808 state->buff_index = 0;
1810 /* Sanity check the data as much as possible */
1811 memcpy(&tmp, in, sizeof(u32));
1812 if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1818 state->buff0_cnt = tmp;
1819 memcpy(state->buff0, in, state->buff0_cnt);
1825 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1826 const u8 *key, unsigned int keylen)
1828 return ssi_hash_setkey((void *)ahash, key, keylen, false);
1831 struct ssi_hash_template {
1832 char name[CRYPTO_MAX_ALG_NAME];
1833 char driver_name[CRYPTO_MAX_ALG_NAME];
1834 char mac_name[CRYPTO_MAX_ALG_NAME];
1835 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1836 unsigned int blocksize;
1838 struct ahash_alg template_ahash;
1841 int inter_digestsize;
1842 struct ssi_drvdata *drvdata;
1845 #define CC_STATE_SIZE(_x) \
1846 ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1848 /* hash descriptors */
1849 static struct ssi_hash_template driver_hash[] = {
1850 //Asynchronize hash template
1853 .driver_name = "sha1-dx",
1854 .mac_name = "hmac(sha1)",
1855 .mac_driver_name = "hmac-sha1-dx",
1856 .blocksize = SHA1_BLOCK_SIZE,
1857 .synchronize = false,
1859 .init = ssi_ahash_init,
1860 .update = ssi_ahash_update,
1861 .final = ssi_ahash_final,
1862 .finup = ssi_ahash_finup,
1863 .digest = ssi_ahash_digest,
1864 .export = ssi_ahash_export,
1865 .import = ssi_ahash_import,
1866 .setkey = ssi_ahash_setkey,
1868 .digestsize = SHA1_DIGEST_SIZE,
1869 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1872 .hash_mode = DRV_HASH_SHA1,
1873 .hw_mode = DRV_HASH_HW_SHA1,
1874 .inter_digestsize = SHA1_DIGEST_SIZE,
1878 .driver_name = "sha256-dx",
1879 .mac_name = "hmac(sha256)",
1880 .mac_driver_name = "hmac-sha256-dx",
1881 .blocksize = SHA256_BLOCK_SIZE,
1883 .init = ssi_ahash_init,
1884 .update = ssi_ahash_update,
1885 .final = ssi_ahash_final,
1886 .finup = ssi_ahash_finup,
1887 .digest = ssi_ahash_digest,
1888 .export = ssi_ahash_export,
1889 .import = ssi_ahash_import,
1890 .setkey = ssi_ahash_setkey,
1892 .digestsize = SHA256_DIGEST_SIZE,
1893 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1896 .hash_mode = DRV_HASH_SHA256,
1897 .hw_mode = DRV_HASH_HW_SHA256,
1898 .inter_digestsize = SHA256_DIGEST_SIZE,
1902 .driver_name = "sha224-dx",
1903 .mac_name = "hmac(sha224)",
1904 .mac_driver_name = "hmac-sha224-dx",
1905 .blocksize = SHA224_BLOCK_SIZE,
1907 .init = ssi_ahash_init,
1908 .update = ssi_ahash_update,
1909 .final = ssi_ahash_final,
1910 .finup = ssi_ahash_finup,
1911 .digest = ssi_ahash_digest,
1912 .export = ssi_ahash_export,
1913 .import = ssi_ahash_import,
1914 .setkey = ssi_ahash_setkey,
1916 .digestsize = SHA224_DIGEST_SIZE,
1917 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1920 .hash_mode = DRV_HASH_SHA224,
1921 .hw_mode = DRV_HASH_HW_SHA256,
1922 .inter_digestsize = SHA256_DIGEST_SIZE,
1924 #if (DX_DEV_SHA_MAX > 256)
1927 .driver_name = "sha384-dx",
1928 .mac_name = "hmac(sha384)",
1929 .mac_driver_name = "hmac-sha384-dx",
1930 .blocksize = SHA384_BLOCK_SIZE,
1932 .init = ssi_ahash_init,
1933 .update = ssi_ahash_update,
1934 .final = ssi_ahash_final,
1935 .finup = ssi_ahash_finup,
1936 .digest = ssi_ahash_digest,
1937 .export = ssi_ahash_export,
1938 .import = ssi_ahash_import,
1939 .setkey = ssi_ahash_setkey,
1941 .digestsize = SHA384_DIGEST_SIZE,
1942 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1945 .hash_mode = DRV_HASH_SHA384,
1946 .hw_mode = DRV_HASH_HW_SHA512,
1947 .inter_digestsize = SHA512_DIGEST_SIZE,
1951 .driver_name = "sha512-dx",
1952 .mac_name = "hmac(sha512)",
1953 .mac_driver_name = "hmac-sha512-dx",
1954 .blocksize = SHA512_BLOCK_SIZE,
1956 .init = ssi_ahash_init,
1957 .update = ssi_ahash_update,
1958 .final = ssi_ahash_final,
1959 .finup = ssi_ahash_finup,
1960 .digest = ssi_ahash_digest,
1961 .export = ssi_ahash_export,
1962 .import = ssi_ahash_import,
1963 .setkey = ssi_ahash_setkey,
1965 .digestsize = SHA512_DIGEST_SIZE,
1966 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1969 .hash_mode = DRV_HASH_SHA512,
1970 .hw_mode = DRV_HASH_HW_SHA512,
1971 .inter_digestsize = SHA512_DIGEST_SIZE,
1976 .driver_name = "md5-dx",
1977 .mac_name = "hmac(md5)",
1978 .mac_driver_name = "hmac-md5-dx",
1979 .blocksize = MD5_HMAC_BLOCK_SIZE,
1981 .init = ssi_ahash_init,
1982 .update = ssi_ahash_update,
1983 .final = ssi_ahash_final,
1984 .finup = ssi_ahash_finup,
1985 .digest = ssi_ahash_digest,
1986 .export = ssi_ahash_export,
1987 .import = ssi_ahash_import,
1988 .setkey = ssi_ahash_setkey,
1990 .digestsize = MD5_DIGEST_SIZE,
1991 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1994 .hash_mode = DRV_HASH_MD5,
1995 .hw_mode = DRV_HASH_HW_MD5,
1996 .inter_digestsize = MD5_DIGEST_SIZE,
1999 .mac_name = "xcbc(aes)",
2000 .mac_driver_name = "xcbc-aes-dx",
2001 .blocksize = AES_BLOCK_SIZE,
2003 .init = ssi_ahash_init,
2004 .update = ssi_mac_update,
2005 .final = ssi_mac_final,
2006 .finup = ssi_mac_finup,
2007 .digest = ssi_mac_digest,
2008 .setkey = ssi_xcbc_setkey,
2009 .export = ssi_ahash_export,
2010 .import = ssi_ahash_import,
2012 .digestsize = AES_BLOCK_SIZE,
2013 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2016 .hash_mode = DRV_HASH_NULL,
2017 .hw_mode = DRV_CIPHER_XCBC_MAC,
2018 .inter_digestsize = AES_BLOCK_SIZE,
2022 .mac_name = "cmac(aes)",
2023 .mac_driver_name = "cmac-aes-dx",
2024 .blocksize = AES_BLOCK_SIZE,
2026 .init = ssi_ahash_init,
2027 .update = ssi_mac_update,
2028 .final = ssi_mac_final,
2029 .finup = ssi_mac_finup,
2030 .digest = ssi_mac_digest,
2031 .setkey = ssi_cmac_setkey,
2032 .export = ssi_ahash_export,
2033 .import = ssi_ahash_import,
2035 .digestsize = AES_BLOCK_SIZE,
2036 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2039 .hash_mode = DRV_HASH_NULL,
2040 .hw_mode = DRV_CIPHER_CMAC,
2041 .inter_digestsize = AES_BLOCK_SIZE,
2047 static struct ssi_hash_alg *
2048 ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
2051 struct ssi_hash_alg *t_crypto_alg;
2052 struct crypto_alg *alg;
2053 struct ahash_alg *halg;
2055 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
2057 return ERR_PTR(-ENOMEM);
2060 t_crypto_alg->ahash_alg = template->template_ahash;
2061 halg = &t_crypto_alg->ahash_alg;
2062 alg = &halg->halg.base;
2065 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2066 template->mac_name);
2067 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2068 template->mac_driver_name);
2070 halg->setkey = NULL;
2071 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2073 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2074 template->driver_name);
2076 alg->cra_module = THIS_MODULE;
2077 alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2078 alg->cra_priority = SSI_CRA_PRIO;
2079 alg->cra_blocksize = template->blocksize;
2080 alg->cra_alignmask = 0;
2081 alg->cra_exit = ssi_hash_cra_exit;
2083 alg->cra_init = ssi_ahash_cra_init;
2084 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2085 CRYPTO_ALG_KERN_DRIVER_ONLY;
2086 alg->cra_type = &crypto_ahash_type;
2088 t_crypto_alg->hash_mode = template->hash_mode;
2089 t_crypto_alg->hw_mode = template->hw_mode;
2090 t_crypto_alg->inter_digestsize = template->inter_digestsize;
2092 return t_crypto_alg;
2095 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2097 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2098 ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2099 unsigned int larval_seq_len = 0;
2100 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
2101 struct device *dev = drvdata_to_dev(drvdata);
2103 #if (DX_DEV_SHA_MAX > 256)
2107 /* Copy-to-sram digest-len */
2108 ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
2109 ARRAY_SIZE(digest_len_init),
2110 larval_seq, &larval_seq_len);
2111 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2112 if (unlikely(rc != 0))
2113 goto init_digest_const_err;
2115 sram_buff_ofs += sizeof(digest_len_init);
2118 #if (DX_DEV_SHA_MAX > 256)
2119 /* Copy-to-sram digest-len for sha384/512 */
2120 ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
2121 ARRAY_SIZE(digest_len_sha512_init),
2122 larval_seq, &larval_seq_len);
2123 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2124 if (unlikely(rc != 0))
2125 goto init_digest_const_err;
2127 sram_buff_ofs += sizeof(digest_len_sha512_init);
2131 /* The initial digests offset */
2132 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2134 /* Copy-to-sram initial SHA* digests */
2135 ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
2136 ARRAY_SIZE(md5_init), larval_seq,
2138 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2139 if (unlikely(rc != 0))
2140 goto init_digest_const_err;
2141 sram_buff_ofs += sizeof(md5_init);
2144 ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
2145 ARRAY_SIZE(sha1_init), larval_seq,
2147 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2148 if (unlikely(rc != 0))
2149 goto init_digest_const_err;
2150 sram_buff_ofs += sizeof(sha1_init);
2153 ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
2154 ARRAY_SIZE(sha224_init), larval_seq,
2156 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2157 if (unlikely(rc != 0))
2158 goto init_digest_const_err;
2159 sram_buff_ofs += sizeof(sha224_init);
2162 ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
2163 ARRAY_SIZE(sha256_init), larval_seq,
2165 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2166 if (unlikely(rc != 0))
2167 goto init_digest_const_err;
2168 sram_buff_ofs += sizeof(sha256_init);
2171 #if (DX_DEV_SHA_MAX > 256)
2172 /* We are forced to swap each double-word larval before copying to sram */
2173 for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2174 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2175 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
2177 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2178 larval_seq, &larval_seq_len);
2179 sram_buff_ofs += sizeof(u32);
2180 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2181 larval_seq, &larval_seq_len);
2182 sram_buff_ofs += sizeof(u32);
2184 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2185 if (unlikely(rc != 0)) {
2186 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2187 goto init_digest_const_err;
2191 for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2192 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2193 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
2195 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2196 larval_seq, &larval_seq_len);
2197 sram_buff_ofs += sizeof(u32);
2198 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2199 larval_seq, &larval_seq_len);
2200 sram_buff_ofs += sizeof(u32);
2202 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2203 if (unlikely(rc != 0)) {
2204 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2205 goto init_digest_const_err;
2209 init_digest_const_err:
2213 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2215 struct ssi_hash_handle *hash_handle;
2216 ssi_sram_addr_t sram_buff;
2217 u32 sram_size_to_alloc;
2218 struct device *dev = drvdata_to_dev(drvdata);
2222 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2226 INIT_LIST_HEAD(&hash_handle->hash_list);
2227 drvdata->hash_handle = hash_handle;
2229 sram_size_to_alloc = sizeof(digest_len_init) +
2230 #if (DX_DEV_SHA_MAX > 256)
2231 sizeof(digest_len_sha512_init) +
2232 sizeof(sha384_init) +
2233 sizeof(sha512_init) +
2237 sizeof(sha224_init) +
2238 sizeof(sha256_init);
2240 sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2241 if (sram_buff == NULL_SRAM_ADDR) {
2242 dev_err(dev, "SRAM pool exhausted\n");
2247 /* The initial digest-len offset */
2248 hash_handle->digest_len_sram_addr = sram_buff;
2250 /*must be set before the alg registration as it is being used there*/
2251 rc = ssi_hash_init_sram_digest_consts(drvdata);
2252 if (unlikely(rc != 0)) {
2253 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2257 /* ahash registration */
2258 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2259 struct ssi_hash_alg *t_alg;
2260 int hw_mode = driver_hash[alg].hw_mode;
2262 /* register hmac version */
2263 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
2264 if (IS_ERR(t_alg)) {
2265 rc = PTR_ERR(t_alg);
2266 dev_err(dev, "%s alg allocation failed\n",
2267 driver_hash[alg].driver_name);
2270 t_alg->drvdata = drvdata;
2272 rc = crypto_register_ahash(&t_alg->ahash_alg);
2274 dev_err(dev, "%s alg registration failed\n",
2275 driver_hash[alg].driver_name);
2279 list_add_tail(&t_alg->entry,
2280 &hash_handle->hash_list);
2283 if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
2284 (hw_mode == DRV_CIPHER_CMAC))
2287 /* register hash version */
2288 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
2289 if (IS_ERR(t_alg)) {
2290 rc = PTR_ERR(t_alg);
2291 dev_err(dev, "%s alg allocation failed\n",
2292 driver_hash[alg].driver_name);
2295 t_alg->drvdata = drvdata;
2297 rc = crypto_register_ahash(&t_alg->ahash_alg);
2299 dev_err(dev, "%s alg registration failed\n",
2300 driver_hash[alg].driver_name);
2304 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2311 kfree(drvdata->hash_handle);
2312 drvdata->hash_handle = NULL;
2316 int ssi_hash_free(struct ssi_drvdata *drvdata)
2318 struct ssi_hash_alg *t_hash_alg, *hash_n;
2319 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2322 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2323 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2324 list_del(&t_hash_alg->entry);
2329 drvdata->hash_handle = NULL;
2334 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2335 struct cc_hw_desc desc[],
2336 unsigned int *seq_size)
2338 unsigned int idx = *seq_size;
2339 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2340 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2341 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2343 /* Setup XCBC MAC K1 */
2344 hw_desc_init(&desc[idx]);
2345 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2346 XCBC_MAC_K1_OFFSET),
2347 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2348 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2349 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2350 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2351 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2352 set_flow_mode(&desc[idx], S_DIN_to_AES);
2355 /* Setup XCBC MAC K2 */
2356 hw_desc_init(&desc[idx]);
2357 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2358 XCBC_MAC_K2_OFFSET),
2359 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2360 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2361 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2362 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2363 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2364 set_flow_mode(&desc[idx], S_DIN_to_AES);
2367 /* Setup XCBC MAC K3 */
2368 hw_desc_init(&desc[idx]);
2369 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2370 XCBC_MAC_K3_OFFSET),
2371 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2372 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2373 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2374 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2375 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2376 set_flow_mode(&desc[idx], S_DIN_to_AES);
2379 /* Loading MAC state */
2380 hw_desc_init(&desc[idx]);
2381 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2382 CC_AES_BLOCK_SIZE, NS_BIT);
2383 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2384 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2385 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2386 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2387 set_flow_mode(&desc[idx], S_DIN_to_AES);
2392 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2393 struct cc_hw_desc desc[],
2394 unsigned int *seq_size)
2396 unsigned int idx = *seq_size;
2397 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2398 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2399 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2401 /* Setup CMAC Key */
2402 hw_desc_init(&desc[idx]);
2403 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2404 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2405 ctx->key_params.keylen), NS_BIT);
2406 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2407 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2408 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2409 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2410 set_flow_mode(&desc[idx], S_DIN_to_AES);
2413 /* Load MAC state */
2414 hw_desc_init(&desc[idx]);
2415 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2416 CC_AES_BLOCK_SIZE, NS_BIT);
2417 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2418 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2419 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2420 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2421 set_flow_mode(&desc[idx], S_DIN_to_AES);
2426 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2427 struct ssi_hash_ctx *ctx,
2428 unsigned int flow_mode,
2429 struct cc_hw_desc desc[],
2430 bool is_not_last_data,
2431 unsigned int *seq_size)
2433 unsigned int idx = *seq_size;
2434 struct device *dev = drvdata_to_dev(ctx->drvdata);
2436 if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2437 hw_desc_init(&desc[idx]);
2438 set_din_type(&desc[idx], DMA_DLLI,
2439 sg_dma_address(areq_ctx->curr_sg),
2440 areq_ctx->curr_sg->length, NS_BIT);
2441 set_flow_mode(&desc[idx], flow_mode);
2444 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2445 dev_dbg(dev, " NULL mode\n");
2446 /* nothing to build */
2450 hw_desc_init(&desc[idx]);
2451 set_din_type(&desc[idx], DMA_DLLI,
2452 areq_ctx->mlli_params.mlli_dma_addr,
2453 areq_ctx->mlli_params.mlli_len, NS_BIT);
2454 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2455 areq_ctx->mlli_params.mlli_len);
2456 set_flow_mode(&desc[idx], BYPASS);
2459 hw_desc_init(&desc[idx]);
2460 set_din_type(&desc[idx], DMA_MLLI,
2461 ctx->drvdata->mlli_sram_addr,
2462 areq_ctx->mlli_nents, NS_BIT);
2463 set_flow_mode(&desc[idx], flow_mode);
2466 if (is_not_last_data)
2467 set_din_not_last_indication(&desc[(idx - 1)]);
2468 /* return updated desc sequence size */
2473 * Gets the address of the initial digest in SRAM
2474 * according to the given hash mode
2477 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2479 * \return u32 The address of the inital digest in SRAM
2481 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
2483 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2484 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2485 struct device *dev = drvdata_to_dev(_drvdata);
2491 return (hash_handle->larval_digest_sram_addr);
2493 return (hash_handle->larval_digest_sram_addr +
2495 case DRV_HASH_SHA224:
2496 return (hash_handle->larval_digest_sram_addr +
2499 case DRV_HASH_SHA256:
2500 return (hash_handle->larval_digest_sram_addr +
2503 sizeof(sha224_init));
2504 #if (DX_DEV_SHA_MAX > 256)
2505 case DRV_HASH_SHA384:
2506 return (hash_handle->larval_digest_sram_addr +
2509 sizeof(sha224_init) +
2510 sizeof(sha256_init));
2511 case DRV_HASH_SHA512:
2512 return (hash_handle->larval_digest_sram_addr +
2515 sizeof(sha224_init) +
2516 sizeof(sha256_init) +
2517 sizeof(sha384_init));
2520 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2523 /*This is valid wrong value to avoid kernel crash*/
2524 return hash_handle->larval_digest_sram_addr;
2528 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
2530 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2531 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2532 ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2536 case DRV_HASH_SHA224:
2537 case DRV_HASH_SHA256:
2539 return digest_len_addr;
2540 #if (DX_DEV_SHA_MAX > 256)
2541 case DRV_HASH_SHA384:
2542 case DRV_HASH_SHA512:
2543 return digest_len_addr + sizeof(digest_len_init);
2546 return digest_len_addr; /*to avoid kernel crash*/