Merge tag 'powerpc-4.15-7' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[sfrench/cifs-2.6.git] / drivers / staging / ccree / ssi_hash.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
25
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
31 #include "ssi_hash.h"
32 #include "ssi_sram_mgr.h"
33
34 #define SSI_MAX_AHASH_SEQ_LEN 12
35 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
36
37 struct ssi_hash_handle {
38         ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
39         ssi_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
40         struct list_head hash_list;
41         struct completion init_comp;
42 };
43
44 static const u32 digest_len_init[] = {
45         0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46 static const u32 md5_init[] = {
47         SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
48 static const u32 sha1_init[] = {
49         SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
50 static const u32 sha224_init[] = {
51         SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52         SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
53 static const u32 sha256_init[] = {
54         SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55         SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56 #if (DX_DEV_SHA_MAX > 256)
57 static const u32 digest_len_sha512_init[] = {
58         0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59 static const u64 sha384_init[] = {
60         SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61         SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
62 static const u64 sha512_init[] = {
63         SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64         SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
65 #endif
66
67 static void ssi_hash_create_xcbc_setup(
68         struct ahash_request *areq,
69         struct cc_hw_desc desc[],
70         unsigned int *seq_size);
71
72 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
73                                        struct cc_hw_desc desc[],
74                                        unsigned int *seq_size);
75
76 struct ssi_hash_alg {
77         struct list_head entry;
78         int hash_mode;
79         int hw_mode;
80         int inter_digestsize;
81         struct ssi_drvdata *drvdata;
82         struct ahash_alg ahash_alg;
83 };
84
85 struct hash_key_req_ctx {
86         u32 keylen;
87         dma_addr_t key_dma_addr;
88 };
89
90 /* hash per-session context */
91 struct ssi_hash_ctx {
92         struct ssi_drvdata *drvdata;
93         /* holds the origin digest; the digest after "setkey" if HMAC,*
94          * the initial digest if HASH.
95          */
96         u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
97         u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
98
99         dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
100         dma_addr_t digest_buff_dma_addr;
101         /* use for hmac with key large then mode block size */
102         struct hash_key_req_ctx key_params;
103         int hash_mode;
104         int hw_mode;
105         int inter_digestsize;
106         struct completion setkey_comp;
107         bool is_hmac;
108 };
109
110 static void ssi_hash_create_data_desc(
111         struct ahash_req_ctx *areq_ctx,
112         struct ssi_hash_ctx *ctx,
113         unsigned int flow_mode, struct cc_hw_desc desc[],
114         bool is_not_last_data,
115         unsigned int *seq_size);
116
117 static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
118 {
119         if (unlikely((mode == DRV_HASH_MD5) ||
120                      (mode == DRV_HASH_SHA384) ||
121                      (mode == DRV_HASH_SHA512))) {
122                 set_bytes_swap(desc, 1);
123         } else {
124                 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
125         }
126 }
127
128 static int ssi_hash_map_result(struct device *dev,
129                                struct ahash_req_ctx *state,
130                                unsigned int digestsize)
131 {
132         state->digest_result_dma_addr =
133                 dma_map_single(dev, (void *)state->digest_result_buff,
134                                digestsize,
135                                DMA_BIDIRECTIONAL);
136         if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
137                 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
138                         digestsize);
139                 return -ENOMEM;
140         }
141         dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
142                 digestsize, state->digest_result_buff,
143                 &state->digest_result_dma_addr);
144
145         return 0;
146 }
147
148 static int ssi_hash_map_request(struct device *dev,
149                                 struct ahash_req_ctx *state,
150                                 struct ssi_hash_ctx *ctx)
151 {
152         bool is_hmac = ctx->is_hmac;
153         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
154                                         ctx->drvdata, ctx->hash_mode);
155         struct ssi_crypto_req ssi_req = {};
156         struct cc_hw_desc desc;
157         int rc = -ENOMEM;
158
159         state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
160         if (!state->buff0)
161                 goto fail0;
162
163         state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
164         if (!state->buff1)
165                 goto fail_buff0;
166
167         state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
168         if (!state->digest_result_buff)
169                 goto fail_buff1;
170
171         state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
172         if (!state->digest_buff)
173                 goto fail_digest_result_buff;
174
175         dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
176                 state->digest_buff);
177         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
178                 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
179                 if (!state->digest_bytes_len)
180                         goto fail1;
181
182                 dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
183                         state->digest_bytes_len);
184         } else {
185                 state->digest_bytes_len = NULL;
186         }
187
188         state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
189         if (!state->opad_digest_buff)
190                 goto fail2;
191
192         dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
193                 state->opad_digest_buff);
194
195         state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
196         if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
197                 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
198                         ctx->inter_digestsize, state->digest_buff);
199                 goto fail3;
200         }
201         dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
202                 ctx->inter_digestsize, state->digest_buff,
203                 &state->digest_buff_dma_addr);
204
205         if (is_hmac) {
206                 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
207                 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
208                         memset(state->digest_buff, 0, ctx->inter_digestsize);
209                 } else { /*sha*/
210                         memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
211 #if (DX_DEV_SHA_MAX > 256)
212                         if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
213                                 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
214                         else
215                                 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
216 #else
217                         memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
218 #endif
219                 }
220                 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
221
222                 if (ctx->hash_mode != DRV_HASH_NULL) {
223                         dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
224                         memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
225                 }
226         } else { /*hash*/
227                 /* Copy the initial digests if hash flow. The SRAM contains the
228                  * initial digests in the expected order for all SHA*
229                  */
230                 hw_desc_init(&desc);
231                 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
232                 set_dout_dlli(&desc, state->digest_buff_dma_addr,
233                               ctx->inter_digestsize, NS_BIT, 0);
234                 set_flow_mode(&desc, BYPASS);
235
236                 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
237                 if (unlikely(rc != 0)) {
238                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
239                         goto fail4;
240                 }
241         }
242
243         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
244                 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
245                 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
246                         dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
247                                 HASH_LEN_SIZE, state->digest_bytes_len);
248                         goto fail4;
249                 }
250                 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
251                         HASH_LEN_SIZE, state->digest_bytes_len,
252                         &state->digest_bytes_len_dma_addr);
253         } else {
254                 state->digest_bytes_len_dma_addr = 0;
255         }
256
257         if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
258                 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
259                 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
260                         dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
261                                 ctx->inter_digestsize,
262                                 state->opad_digest_buff);
263                         goto fail5;
264                 }
265                 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
266                         ctx->inter_digestsize, state->opad_digest_buff,
267                         &state->opad_digest_dma_addr);
268         } else {
269                 state->opad_digest_dma_addr = 0;
270         }
271         state->buff0_cnt = 0;
272         state->buff1_cnt = 0;
273         state->buff_index = 0;
274         state->mlli_params.curr_pool = NULL;
275
276         return 0;
277
278 fail5:
279         if (state->digest_bytes_len_dma_addr != 0) {
280                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
281                 state->digest_bytes_len_dma_addr = 0;
282         }
283 fail4:
284         if (state->digest_buff_dma_addr != 0) {
285                 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
286                 state->digest_buff_dma_addr = 0;
287         }
288 fail3:
289         kfree(state->opad_digest_buff);
290 fail2:
291         kfree(state->digest_bytes_len);
292 fail1:
293          kfree(state->digest_buff);
294 fail_digest_result_buff:
295         kfree(state->digest_result_buff);
296         state->digest_result_buff = NULL;
297 fail_buff1:
298         kfree(state->buff1);
299         state->buff1 = NULL;
300 fail_buff0:
301         kfree(state->buff0);
302         state->buff0 = NULL;
303 fail0:
304         return rc;
305 }
306
307 static void ssi_hash_unmap_request(struct device *dev,
308                                    struct ahash_req_ctx *state,
309                                    struct ssi_hash_ctx *ctx)
310 {
311         if (state->digest_buff_dma_addr != 0) {
312                 dma_unmap_single(dev, state->digest_buff_dma_addr,
313                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
314                 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
315                         &state->digest_buff_dma_addr);
316                 state->digest_buff_dma_addr = 0;
317         }
318         if (state->digest_bytes_len_dma_addr != 0) {
319                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
320                                  HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
321                 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
322                         &state->digest_bytes_len_dma_addr);
323                 state->digest_bytes_len_dma_addr = 0;
324         }
325         if (state->opad_digest_dma_addr != 0) {
326                 dma_unmap_single(dev, state->opad_digest_dma_addr,
327                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
328                 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
329                         &state->opad_digest_dma_addr);
330                 state->opad_digest_dma_addr = 0;
331         }
332
333         kfree(state->opad_digest_buff);
334         kfree(state->digest_bytes_len);
335         kfree(state->digest_buff);
336         kfree(state->digest_result_buff);
337         kfree(state->buff1);
338         kfree(state->buff0);
339 }
340
341 static void ssi_hash_unmap_result(struct device *dev,
342                                   struct ahash_req_ctx *state,
343                                   unsigned int digestsize, u8 *result)
344 {
345         if (state->digest_result_dma_addr != 0) {
346                 dma_unmap_single(dev,
347                                  state->digest_result_dma_addr,
348                                  digestsize,
349                                   DMA_BIDIRECTIONAL);
350                 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
351                         state->digest_result_buff,
352                         &state->digest_result_dma_addr, digestsize);
353                 memcpy(result,
354                        state->digest_result_buff,
355                        digestsize);
356         }
357         state->digest_result_dma_addr = 0;
358 }
359
360 static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
361 {
362         struct ahash_request *req = (struct ahash_request *)ssi_req;
363         struct ahash_req_ctx *state = ahash_request_ctx(req);
364
365         dev_dbg(dev, "req=%pK\n", req);
366
367         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
368         req->base.complete(&req->base, 0);
369 }
370
371 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
372 {
373         struct ahash_request *req = (struct ahash_request *)ssi_req;
374         struct ahash_req_ctx *state = ahash_request_ctx(req);
375         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
377         u32 digestsize = crypto_ahash_digestsize(tfm);
378
379         dev_dbg(dev, "req=%pK\n", req);
380
381         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
382         ssi_hash_unmap_result(dev, state, digestsize, req->result);
383         ssi_hash_unmap_request(dev, state, ctx);
384         req->base.complete(&req->base, 0);
385 }
386
387 static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
388 {
389         struct ahash_request *req = (struct ahash_request *)ssi_req;
390         struct ahash_req_ctx *state = ahash_request_ctx(req);
391         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
392         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
393         u32 digestsize = crypto_ahash_digestsize(tfm);
394
395         dev_dbg(dev, "req=%pK\n", req);
396
397         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
398         ssi_hash_unmap_result(dev, state, digestsize, req->result);
399         ssi_hash_unmap_request(dev, state, ctx);
400         req->base.complete(&req->base, 0);
401 }
402
403 static int ssi_hash_digest(struct ahash_req_ctx *state,
404                            struct ssi_hash_ctx *ctx,
405                            unsigned int digestsize,
406                            struct scatterlist *src,
407                            unsigned int nbytes, u8 *result,
408                            void *async_req)
409 {
410         struct device *dev = drvdata_to_dev(ctx->drvdata);
411         bool is_hmac = ctx->is_hmac;
412         struct ssi_crypto_req ssi_req = {};
413         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
414         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
415                                         ctx->drvdata, ctx->hash_mode);
416         int idx = 0;
417         int rc = 0;
418
419         dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
420                 nbytes);
421
422         if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
423                 dev_err(dev, "map_ahash_source() failed\n");
424                 return -ENOMEM;
425         }
426
427         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
428                 dev_err(dev, "map_ahash_digest() failed\n");
429                 return -ENOMEM;
430         }
431
432         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
433                 dev_err(dev, "map_ahash_request_final() failed\n");
434                 return -ENOMEM;
435         }
436
437         if (async_req) {
438                 /* Setup DX request structure */
439                 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
440                 ssi_req.user_arg = (void *)async_req;
441         }
442
443         /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
444         hw_desc_init(&desc[idx]);
445         set_cipher_mode(&desc[idx], ctx->hw_mode);
446         if (is_hmac) {
447                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
448                              ctx->inter_digestsize, NS_BIT);
449         } else {
450                 set_din_sram(&desc[idx], larval_digest_addr,
451                              ctx->inter_digestsize);
452         }
453         set_flow_mode(&desc[idx], S_DIN_to_HASH);
454         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
455         idx++;
456
457         /* Load the hash current length */
458         hw_desc_init(&desc[idx]);
459         set_cipher_mode(&desc[idx], ctx->hw_mode);
460
461         if (is_hmac) {
462                 set_din_type(&desc[idx], DMA_DLLI,
463                              state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
464                              NS_BIT);
465         } else {
466                 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
467                 if (likely(nbytes != 0))
468                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
469                 else
470                         set_cipher_do(&desc[idx], DO_PAD);
471         }
472         set_flow_mode(&desc[idx], S_DIN_to_HASH);
473         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
474         idx++;
475
476         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
477
478         if (is_hmac) {
479                 /* HW last hash block padding (aka. "DO_PAD") */
480                 hw_desc_init(&desc[idx]);
481                 set_cipher_mode(&desc[idx], ctx->hw_mode);
482                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
483                               HASH_LEN_SIZE, NS_BIT, 0);
484                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
485                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
486                 set_cipher_do(&desc[idx], DO_PAD);
487                 idx++;
488
489                 /* store the hash digest result in the context */
490                 hw_desc_init(&desc[idx]);
491                 set_cipher_mode(&desc[idx], ctx->hw_mode);
492                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
493                               digestsize, NS_BIT, 0);
494                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
495                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
496                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
497                 idx++;
498
499                 /* Loading hash opad xor key state */
500                 hw_desc_init(&desc[idx]);
501                 set_cipher_mode(&desc[idx], ctx->hw_mode);
502                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
503                              ctx->inter_digestsize, NS_BIT);
504                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
505                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
506                 idx++;
507
508                 /* Load the hash current length */
509                 hw_desc_init(&desc[idx]);
510                 set_cipher_mode(&desc[idx], ctx->hw_mode);
511                 set_din_sram(&desc[idx],
512                              ssi_ahash_get_initial_digest_len_sram_addr(
513 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
514                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
515                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
516                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
517                 idx++;
518
519                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
520                 hw_desc_init(&desc[idx]);
521                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
522                 set_dout_no_dma(&desc[idx], 0, 0, 1);
523                 idx++;
524
525                 /* Perform HASH update */
526                 hw_desc_init(&desc[idx]);
527                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
528                              digestsize, NS_BIT);
529                 set_flow_mode(&desc[idx], DIN_HASH);
530                 idx++;
531         }
532
533         /* Get final MAC result */
534         hw_desc_init(&desc[idx]);
535         set_cipher_mode(&desc[idx], ctx->hw_mode);
536         /* TODO */
537         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
538                       NS_BIT, (async_req ? 1 : 0));
539         if (async_req)
540                 set_queue_last_ind(&desc[idx]);
541         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
542         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
543         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
544         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
545         idx++;
546
547         if (async_req) {
548                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
549                 if (unlikely(rc != -EINPROGRESS)) {
550                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
551                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
552                         ssi_hash_unmap_result(dev, state, digestsize, result);
553                         ssi_hash_unmap_request(dev, state, ctx);
554                 }
555         } else {
556                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
557                 if (rc != 0) {
558                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
559                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
560                 } else {
561                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
562                 }
563                 ssi_hash_unmap_result(dev, state, digestsize, result);
564                 ssi_hash_unmap_request(dev, state, ctx);
565         }
566         return rc;
567 }
568
569 static int ssi_hash_update(struct ahash_req_ctx *state,
570                            struct ssi_hash_ctx *ctx,
571                            unsigned int block_size,
572                            struct scatterlist *src,
573                            unsigned int nbytes,
574                            void *async_req)
575 {
576         struct device *dev = drvdata_to_dev(ctx->drvdata);
577         struct ssi_crypto_req ssi_req = {};
578         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
579         u32 idx = 0;
580         int rc;
581
582         dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
583                 "hmac" : "hash", nbytes);
584
585         if (nbytes == 0) {
586                 /* no real updates required */
587                 return 0;
588         }
589
590         rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
591         if (unlikely(rc)) {
592                 if (rc == 1) {
593                         dev_dbg(dev, " data size not require HW update %x\n",
594                                 nbytes);
595                         /* No hardware updates are required */
596                         return 0;
597                 }
598                 dev_err(dev, "map_ahash_request_update() failed\n");
599                 return -ENOMEM;
600         }
601
602         if (async_req) {
603                 /* Setup DX request structure */
604                 ssi_req.user_cb = (void *)ssi_hash_update_complete;
605                 ssi_req.user_arg = async_req;
606         }
607
608         /* Restore hash digest */
609         hw_desc_init(&desc[idx]);
610         set_cipher_mode(&desc[idx], ctx->hw_mode);
611         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
612                      ctx->inter_digestsize, NS_BIT);
613         set_flow_mode(&desc[idx], S_DIN_to_HASH);
614         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
615         idx++;
616         /* Restore hash current length */
617         hw_desc_init(&desc[idx]);
618         set_cipher_mode(&desc[idx], ctx->hw_mode);
619         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
620                      HASH_LEN_SIZE, NS_BIT);
621         set_flow_mode(&desc[idx], S_DIN_to_HASH);
622         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
623         idx++;
624
625         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
626
627         /* store the hash digest result in context */
628         hw_desc_init(&desc[idx]);
629         set_cipher_mode(&desc[idx], ctx->hw_mode);
630         set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
631                       ctx->inter_digestsize, NS_BIT, 0);
632         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
633         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
634         idx++;
635
636         /* store current hash length in context */
637         hw_desc_init(&desc[idx]);
638         set_cipher_mode(&desc[idx], ctx->hw_mode);
639         set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
640                       HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
641         if (async_req)
642                 set_queue_last_ind(&desc[idx]);
643         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
644         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
645         idx++;
646
647         if (async_req) {
648                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
649                 if (unlikely(rc != -EINPROGRESS)) {
650                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
651                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
652                 }
653         } else {
654                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
655                 if (rc != 0) {
656                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
657                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
658                 } else {
659                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
660                 }
661         }
662         return rc;
663 }
664
665 static int ssi_hash_finup(struct ahash_req_ctx *state,
666                           struct ssi_hash_ctx *ctx,
667                           unsigned int digestsize,
668                           struct scatterlist *src,
669                           unsigned int nbytes,
670                           u8 *result,
671                           void *async_req)
672 {
673         struct device *dev = drvdata_to_dev(ctx->drvdata);
674         bool is_hmac = ctx->is_hmac;
675         struct ssi_crypto_req ssi_req = {};
676         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
677         int idx = 0;
678         int rc;
679
680         dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
681                 nbytes);
682
683         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
684                 dev_err(dev, "map_ahash_request_final() failed\n");
685                 return -ENOMEM;
686         }
687         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
688                 dev_err(dev, "map_ahash_digest() failed\n");
689                 return -ENOMEM;
690         }
691
692         if (async_req) {
693                 /* Setup DX request structure */
694                 ssi_req.user_cb = (void *)ssi_hash_complete;
695                 ssi_req.user_arg = async_req;
696         }
697
698         /* Restore hash digest */
699         hw_desc_init(&desc[idx]);
700         set_cipher_mode(&desc[idx], ctx->hw_mode);
701         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
702                      ctx->inter_digestsize, NS_BIT);
703         set_flow_mode(&desc[idx], S_DIN_to_HASH);
704         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
705         idx++;
706
707         /* Restore hash current length */
708         hw_desc_init(&desc[idx]);
709         set_cipher_mode(&desc[idx], ctx->hw_mode);
710         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
711         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
712                      HASH_LEN_SIZE, NS_BIT);
713         set_flow_mode(&desc[idx], S_DIN_to_HASH);
714         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
715         idx++;
716
717         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
718
719         if (is_hmac) {
720                 /* Store the hash digest result in the context */
721                 hw_desc_init(&desc[idx]);
722                 set_cipher_mode(&desc[idx], ctx->hw_mode);
723                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
724                               digestsize, NS_BIT, 0);
725                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
726                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
727                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
728                 idx++;
729
730                 /* Loading hash OPAD xor key state */
731                 hw_desc_init(&desc[idx]);
732                 set_cipher_mode(&desc[idx], ctx->hw_mode);
733                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
734                              ctx->inter_digestsize, NS_BIT);
735                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
736                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
737                 idx++;
738
739                 /* Load the hash current length */
740                 hw_desc_init(&desc[idx]);
741                 set_cipher_mode(&desc[idx], ctx->hw_mode);
742                 set_din_sram(&desc[idx],
743                              ssi_ahash_get_initial_digest_len_sram_addr(
744 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
745                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
746                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
747                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
748                 idx++;
749
750                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
751                 hw_desc_init(&desc[idx]);
752                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
753                 set_dout_no_dma(&desc[idx], 0, 0, 1);
754                 idx++;
755
756                 /* Perform HASH update on last digest */
757                 hw_desc_init(&desc[idx]);
758                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
759                              digestsize, NS_BIT);
760                 set_flow_mode(&desc[idx], DIN_HASH);
761                 idx++;
762         }
763
764         /* Get final MAC result */
765         hw_desc_init(&desc[idx]);
766         /* TODO */
767         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
768                       NS_BIT, (async_req ? 1 : 0));
769         if (async_req)
770                 set_queue_last_ind(&desc[idx]);
771         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
772         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
773         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
774         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
775         set_cipher_mode(&desc[idx], ctx->hw_mode);
776         idx++;
777
778         if (async_req) {
779                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
780                 if (unlikely(rc != -EINPROGRESS)) {
781                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
782                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
783                         ssi_hash_unmap_result(dev, state, digestsize, result);
784                 }
785         } else {
786                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
787                 if (rc != 0) {
788                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
789                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
790                         ssi_hash_unmap_result(dev, state, digestsize, result);
791                 } else {
792                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
793                         ssi_hash_unmap_result(dev, state, digestsize, result);
794                         ssi_hash_unmap_request(dev, state, ctx);
795                 }
796         }
797         return rc;
798 }
799
800 static int ssi_hash_final(struct ahash_req_ctx *state,
801                           struct ssi_hash_ctx *ctx,
802                           unsigned int digestsize,
803                           struct scatterlist *src,
804                           unsigned int nbytes,
805                           u8 *result,
806                           void *async_req)
807 {
808         struct device *dev = drvdata_to_dev(ctx->drvdata);
809         bool is_hmac = ctx->is_hmac;
810         struct ssi_crypto_req ssi_req = {};
811         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
812         int idx = 0;
813         int rc;
814
815         dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
816                 nbytes);
817
818         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
819                 dev_err(dev, "map_ahash_request_final() failed\n");
820                 return -ENOMEM;
821         }
822
823         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
824                 dev_err(dev, "map_ahash_digest() failed\n");
825                 return -ENOMEM;
826         }
827
828         if (async_req) {
829                 /* Setup DX request structure */
830                 ssi_req.user_cb = (void *)ssi_hash_complete;
831                 ssi_req.user_arg = async_req;
832         }
833
834         /* Restore hash digest */
835         hw_desc_init(&desc[idx]);
836         set_cipher_mode(&desc[idx], ctx->hw_mode);
837         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
838                      ctx->inter_digestsize, NS_BIT);
839         set_flow_mode(&desc[idx], S_DIN_to_HASH);
840         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
841         idx++;
842
843         /* Restore hash current length */
844         hw_desc_init(&desc[idx]);
845         set_cipher_mode(&desc[idx], ctx->hw_mode);
846         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
847         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
848                      HASH_LEN_SIZE, NS_BIT);
849         set_flow_mode(&desc[idx], S_DIN_to_HASH);
850         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
851         idx++;
852
853         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
854
855         /* "DO-PAD" must be enabled only when writing current length to HW */
856         hw_desc_init(&desc[idx]);
857         set_cipher_do(&desc[idx], DO_PAD);
858         set_cipher_mode(&desc[idx], ctx->hw_mode);
859         set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
860                       HASH_LEN_SIZE, NS_BIT, 0);
861         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
862         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
863         idx++;
864
865         if (is_hmac) {
866                 /* Store the hash digest result in the context */
867                 hw_desc_init(&desc[idx]);
868                 set_cipher_mode(&desc[idx], ctx->hw_mode);
869                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
870                               digestsize, NS_BIT, 0);
871                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
872                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
873                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
874                 idx++;
875
876                 /* Loading hash OPAD xor key state */
877                 hw_desc_init(&desc[idx]);
878                 set_cipher_mode(&desc[idx], ctx->hw_mode);
879                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
880                              ctx->inter_digestsize, NS_BIT);
881                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
882                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
883                 idx++;
884
885                 /* Load the hash current length */
886                 hw_desc_init(&desc[idx]);
887                 set_cipher_mode(&desc[idx], ctx->hw_mode);
888                 set_din_sram(&desc[idx],
889                              ssi_ahash_get_initial_digest_len_sram_addr(
890 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
891                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
892                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
893                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
894                 idx++;
895
896                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
897                 hw_desc_init(&desc[idx]);
898                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
899                 set_dout_no_dma(&desc[idx], 0, 0, 1);
900                 idx++;
901
902                 /* Perform HASH update on last digest */
903                 hw_desc_init(&desc[idx]);
904                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
905                              digestsize, NS_BIT);
906                 set_flow_mode(&desc[idx], DIN_HASH);
907                 idx++;
908         }
909
910         /* Get final MAC result */
911         hw_desc_init(&desc[idx]);
912         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
913                       NS_BIT, (async_req ? 1 : 0));
914         if (async_req)
915                 set_queue_last_ind(&desc[idx]);
916         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
917         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
918         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
919         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
920         set_cipher_mode(&desc[idx], ctx->hw_mode);
921         idx++;
922
923         if (async_req) {
924                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
925                 if (unlikely(rc != -EINPROGRESS)) {
926                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
927                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
928                         ssi_hash_unmap_result(dev, state, digestsize, result);
929                 }
930         } else {
931                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
932                 if (rc != 0) {
933                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
934                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
935                         ssi_hash_unmap_result(dev, state, digestsize, result);
936                 } else {
937                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
938                         ssi_hash_unmap_result(dev, state, digestsize, result);
939                         ssi_hash_unmap_request(dev, state, ctx);
940                 }
941         }
942         return rc;
943 }
944
945 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
946 {
947         struct device *dev = drvdata_to_dev(ctx->drvdata);
948
949         state->xcbc_count = 0;
950
951         ssi_hash_map_request(dev, state, ctx);
952
953         return 0;
954 }
955
956 static int ssi_hash_setkey(void *hash,
957                            const u8 *key,
958                            unsigned int keylen,
959                            bool synchronize)
960 {
961         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
962         struct ssi_crypto_req ssi_req = {};
963         struct ssi_hash_ctx *ctx = NULL;
964         int blocksize = 0;
965         int digestsize = 0;
966         int i, idx = 0, rc = 0;
967         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
968         ssi_sram_addr_t larval_addr;
969         struct device *dev;
970
971         ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
972         dev = drvdata_to_dev(ctx->drvdata);
973         dev_dbg(dev, "start keylen: %d", keylen);
974
975         blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
976         digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
977
978         larval_addr = ssi_ahash_get_larval_digest_sram_addr(
979                                         ctx->drvdata, ctx->hash_mode);
980
981         /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
982          * any NON-ZERO value utilizes HMAC flow
983          */
984         ctx->key_params.keylen = keylen;
985         ctx->key_params.key_dma_addr = 0;
986         ctx->is_hmac = true;
987
988         if (keylen != 0) {
989                 ctx->key_params.key_dma_addr = dma_map_single(
990                                                 dev, (void *)key,
991                                                 keylen, DMA_TO_DEVICE);
992                 if (unlikely(dma_mapping_error(dev,
993                                                ctx->key_params.key_dma_addr))) {
994                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
995                                 key, keylen);
996                         return -ENOMEM;
997                 }
998                 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
999                         &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1000
1001                 if (keylen > blocksize) {
1002                         /* Load hash initial state */
1003                         hw_desc_init(&desc[idx]);
1004                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1005                         set_din_sram(&desc[idx], larval_addr,
1006                                      ctx->inter_digestsize);
1007                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1008                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1009                         idx++;
1010
1011                         /* Load the hash current length*/
1012                         hw_desc_init(&desc[idx]);
1013                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1014                         set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1015                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1016                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1017                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1018                         idx++;
1019
1020                         hw_desc_init(&desc[idx]);
1021                         set_din_type(&desc[idx], DMA_DLLI,
1022                                      ctx->key_params.key_dma_addr, keylen,
1023                                      NS_BIT);
1024                         set_flow_mode(&desc[idx], DIN_HASH);
1025                         idx++;
1026
1027                         /* Get hashed key */
1028                         hw_desc_init(&desc[idx]);
1029                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1030                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1031                                       digestsize, NS_BIT, 0);
1032                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1033                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1034                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
1035                         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
1036                         idx++;
1037
1038                         hw_desc_init(&desc[idx]);
1039                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
1040                         set_flow_mode(&desc[idx], BYPASS);
1041                         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1042                                                    digestsize),
1043                                       (blocksize - digestsize), NS_BIT, 0);
1044                         idx++;
1045                 } else {
1046                         hw_desc_init(&desc[idx]);
1047                         set_din_type(&desc[idx], DMA_DLLI,
1048                                      ctx->key_params.key_dma_addr, keylen,
1049                                      NS_BIT);
1050                         set_flow_mode(&desc[idx], BYPASS);
1051                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1052                                       keylen, NS_BIT, 0);
1053                         idx++;
1054
1055                         if ((blocksize - keylen) != 0) {
1056                                 hw_desc_init(&desc[idx]);
1057                                 set_din_const(&desc[idx], 0,
1058                                               (blocksize - keylen));
1059                                 set_flow_mode(&desc[idx], BYPASS);
1060                                 set_dout_dlli(&desc[idx],
1061                                               (ctx->opad_tmp_keys_dma_addr +
1062                                                keylen), (blocksize - keylen),
1063                                               NS_BIT, 0);
1064                                 idx++;
1065                         }
1066                 }
1067         } else {
1068                 hw_desc_init(&desc[idx]);
1069                 set_din_const(&desc[idx], 0, blocksize);
1070                 set_flow_mode(&desc[idx], BYPASS);
1071                 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1072                               blocksize, NS_BIT, 0);
1073                 idx++;
1074         }
1075
1076         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1077         if (unlikely(rc != 0)) {
1078                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1079                 goto out;
1080         }
1081
1082         /* calc derived HMAC key */
1083         for (idx = 0, i = 0; i < 2; i++) {
1084                 /* Load hash initial state */
1085                 hw_desc_init(&desc[idx]);
1086                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1087                 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1088                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1089                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1090                 idx++;
1091
1092                 /* Load the hash current length*/
1093                 hw_desc_init(&desc[idx]);
1094                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1095                 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1096                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1097                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1098                 idx++;
1099
1100                 /* Prepare ipad key */
1101                 hw_desc_init(&desc[idx]);
1102                 set_xor_val(&desc[idx], hmac_pad_const[i]);
1103                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1104                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1105                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1106                 idx++;
1107
1108                 /* Perform HASH update */
1109                 hw_desc_init(&desc[idx]);
1110                 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1111                              blocksize, NS_BIT);
1112                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1113                 set_xor_active(&desc[idx]);
1114                 set_flow_mode(&desc[idx], DIN_HASH);
1115                 idx++;
1116
1117                 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1118                 hw_desc_init(&desc[idx]);
1119                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1120                 if (i > 0) /* Not first iteration */
1121                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1122                                       ctx->inter_digestsize, NS_BIT, 0);
1123                 else /* First iteration */
1124                         set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1125                                       ctx->inter_digestsize, NS_BIT, 0);
1126                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1127                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1128                 idx++;
1129         }
1130
1131         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1132
1133 out:
1134         if (rc)
1135                 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1136
1137         if (ctx->key_params.key_dma_addr) {
1138                 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1139                                  ctx->key_params.keylen, DMA_TO_DEVICE);
1140                 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1141                         &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1142         }
1143         return rc;
1144 }
1145
1146 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1147                            const u8 *key, unsigned int keylen)
1148 {
1149         struct ssi_crypto_req ssi_req = {};
1150         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1151         struct device *dev = drvdata_to_dev(ctx->drvdata);
1152         int idx = 0, rc = 0;
1153         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1154
1155         dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1156
1157         switch (keylen) {
1158         case AES_KEYSIZE_128:
1159         case AES_KEYSIZE_192:
1160         case AES_KEYSIZE_256:
1161                 break;
1162         default:
1163                 return -EINVAL;
1164         }
1165
1166         ctx->key_params.keylen = keylen;
1167
1168         ctx->key_params.key_dma_addr = dma_map_single(
1169                                         dev, (void *)key,
1170                                         keylen, DMA_TO_DEVICE);
1171         if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
1172                 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1173                         key, keylen);
1174                 return -ENOMEM;
1175         }
1176         dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1177                 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1178
1179         ctx->is_hmac = true;
1180         /* 1. Load the AES key */
1181         hw_desc_init(&desc[idx]);
1182         set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1183                      keylen, NS_BIT);
1184         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1185         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1186         set_key_size_aes(&desc[idx], keylen);
1187         set_flow_mode(&desc[idx], S_DIN_to_AES);
1188         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1189         idx++;
1190
1191         hw_desc_init(&desc[idx]);
1192         set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1193         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1194         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1195                                            XCBC_MAC_K1_OFFSET),
1196                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1197         idx++;
1198
1199         hw_desc_init(&desc[idx]);
1200         set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1201         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1202         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1203                                            XCBC_MAC_K2_OFFSET),
1204                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1205         idx++;
1206
1207         hw_desc_init(&desc[idx]);
1208         set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1209         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1210         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1211                                            XCBC_MAC_K3_OFFSET),
1212                                CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1213         idx++;
1214
1215         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1216
1217         if (rc != 0)
1218                 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1219
1220         dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1221                          ctx->key_params.keylen, DMA_TO_DEVICE);
1222         dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1223                 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1224
1225         return rc;
1226 }
1227
1228 #if SSI_CC_HAS_CMAC
1229 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1230                            const u8 *key, unsigned int keylen)
1231 {
1232         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1233         struct device *dev = drvdata_to_dev(ctx->drvdata);
1234
1235         dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1236
1237         ctx->is_hmac = true;
1238
1239         switch (keylen) {
1240         case AES_KEYSIZE_128:
1241         case AES_KEYSIZE_192:
1242         case AES_KEYSIZE_256:
1243                 break;
1244         default:
1245                 return -EINVAL;
1246         }
1247
1248         ctx->key_params.keylen = keylen;
1249
1250         /* STAT_PHASE_1: Copy key to ctx */
1251
1252         dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1253                                 keylen, DMA_TO_DEVICE);
1254
1255         memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1256         if (keylen == 24)
1257                 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1258
1259         dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1260                                    keylen, DMA_TO_DEVICE);
1261
1262         ctx->key_params.keylen = keylen;
1263
1264         return 0;
1265 }
1266 #endif
1267
1268 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1269 {
1270         struct device *dev = drvdata_to_dev(ctx->drvdata);
1271
1272         if (ctx->digest_buff_dma_addr != 0) {
1273                 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1274                                  sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1275                 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1276                         &ctx->digest_buff_dma_addr);
1277                 ctx->digest_buff_dma_addr = 0;
1278         }
1279         if (ctx->opad_tmp_keys_dma_addr != 0) {
1280                 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1281                                  sizeof(ctx->opad_tmp_keys_buff),
1282                                  DMA_BIDIRECTIONAL);
1283                 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1284                         &ctx->opad_tmp_keys_dma_addr);
1285                 ctx->opad_tmp_keys_dma_addr = 0;
1286         }
1287
1288         ctx->key_params.keylen = 0;
1289 }
1290
1291 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1292 {
1293         struct device *dev = drvdata_to_dev(ctx->drvdata);
1294
1295         ctx->key_params.keylen = 0;
1296
1297         ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1298         if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1299                 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1300                         sizeof(ctx->digest_buff), ctx->digest_buff);
1301                 goto fail;
1302         }
1303         dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1304                 sizeof(ctx->digest_buff), ctx->digest_buff,
1305                 &ctx->digest_buff_dma_addr);
1306
1307         ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1308         if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1309                 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1310                         sizeof(ctx->opad_tmp_keys_buff),
1311                         ctx->opad_tmp_keys_buff);
1312                 goto fail;
1313         }
1314         dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1315                 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1316                 &ctx->opad_tmp_keys_dma_addr);
1317
1318         ctx->is_hmac = false;
1319         return 0;
1320
1321 fail:
1322         ssi_hash_free_ctx(ctx);
1323         return -ENOMEM;
1324 }
1325
1326 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1327 {
1328         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1329         struct hash_alg_common *hash_alg_common =
1330                 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1331         struct ahash_alg *ahash_alg =
1332                 container_of(hash_alg_common, struct ahash_alg, halg);
1333         struct ssi_hash_alg *ssi_alg =
1334                         container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1335
1336         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1337                                  sizeof(struct ahash_req_ctx));
1338
1339         ctx->hash_mode = ssi_alg->hash_mode;
1340         ctx->hw_mode = ssi_alg->hw_mode;
1341         ctx->inter_digestsize = ssi_alg->inter_digestsize;
1342         ctx->drvdata = ssi_alg->drvdata;
1343
1344         return ssi_hash_alloc_ctx(ctx);
1345 }
1346
1347 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1348 {
1349         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1350         struct device *dev = drvdata_to_dev(ctx->drvdata);
1351
1352         dev_dbg(dev, "ssi_hash_cra_exit");
1353         ssi_hash_free_ctx(ctx);
1354 }
1355
1356 static int ssi_mac_update(struct ahash_request *req)
1357 {
1358         struct ahash_req_ctx *state = ahash_request_ctx(req);
1359         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1360         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1361         struct device *dev = drvdata_to_dev(ctx->drvdata);
1362         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1363         struct ssi_crypto_req ssi_req = {};
1364         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1365         int rc;
1366         u32 idx = 0;
1367
1368         if (req->nbytes == 0) {
1369                 /* no real updates required */
1370                 return 0;
1371         }
1372
1373         state->xcbc_count++;
1374
1375         rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
1376         if (unlikely(rc)) {
1377                 if (rc == 1) {
1378                         dev_dbg(dev, " data size not require HW update %x\n",
1379                                 req->nbytes);
1380                         /* No hardware updates are required */
1381                         return 0;
1382                 }
1383                 dev_err(dev, "map_ahash_request_update() failed\n");
1384                 return -ENOMEM;
1385         }
1386
1387         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1388                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1389         else
1390                 ssi_hash_create_cmac_setup(req, desc, &idx);
1391
1392         ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1393
1394         /* store the hash digest result in context */
1395         hw_desc_init(&desc[idx]);
1396         set_cipher_mode(&desc[idx], ctx->hw_mode);
1397         set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1398                       ctx->inter_digestsize, NS_BIT, 1);
1399         set_queue_last_ind(&desc[idx]);
1400         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1401         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1402         idx++;
1403
1404         /* Setup DX request structure */
1405         ssi_req.user_cb = (void *)ssi_hash_update_complete;
1406         ssi_req.user_arg = (void *)req;
1407
1408         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1409         if (unlikely(rc != -EINPROGRESS)) {
1410                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1411                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1412         }
1413         return rc;
1414 }
1415
1416 static int ssi_mac_final(struct ahash_request *req)
1417 {
1418         struct ahash_req_ctx *state = ahash_request_ctx(req);
1419         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1420         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1421         struct device *dev = drvdata_to_dev(ctx->drvdata);
1422         struct ssi_crypto_req ssi_req = {};
1423         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1424         int idx = 0;
1425         int rc = 0;
1426         u32 key_size, key_len;
1427         u32 digestsize = crypto_ahash_digestsize(tfm);
1428
1429         u32 rem_cnt = state->buff_index ? state->buff1_cnt :
1430                         state->buff0_cnt;
1431
1432         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1433                 key_size = CC_AES_128_BIT_KEY_SIZE;
1434                 key_len  = CC_AES_128_BIT_KEY_SIZE;
1435         } else {
1436                 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1437                         ctx->key_params.keylen;
1438                 key_len =  ctx->key_params.keylen;
1439         }
1440
1441         dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1442
1443         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
1444                 dev_err(dev, "map_ahash_request_final() failed\n");
1445                 return -ENOMEM;
1446         }
1447
1448         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1449                 dev_err(dev, "map_ahash_digest() failed\n");
1450                 return -ENOMEM;
1451         }
1452
1453         /* Setup DX request structure */
1454         ssi_req.user_cb = (void *)ssi_hash_complete;
1455         ssi_req.user_arg = (void *)req;
1456
1457         if (state->xcbc_count && (rem_cnt == 0)) {
1458                 /* Load key for ECB decryption */
1459                 hw_desc_init(&desc[idx]);
1460                 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1461                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1462                 set_din_type(&desc[idx], DMA_DLLI,
1463                              (ctx->opad_tmp_keys_dma_addr +
1464                               XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1465                 set_key_size_aes(&desc[idx], key_len);
1466                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1467                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1468                 idx++;
1469
1470                 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1471                 hw_desc_init(&desc[idx]);
1472                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1473                              CC_AES_BLOCK_SIZE, NS_BIT);
1474                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1475                               CC_AES_BLOCK_SIZE, NS_BIT, 0);
1476                 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1477                 idx++;
1478
1479                 /* Memory Barrier: wait for axi write to complete */
1480                 hw_desc_init(&desc[idx]);
1481                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1482                 set_dout_no_dma(&desc[idx], 0, 0, 1);
1483                 idx++;
1484         }
1485
1486         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1487                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1488         else
1489                 ssi_hash_create_cmac_setup(req, desc, &idx);
1490
1491         if (state->xcbc_count == 0) {
1492                 hw_desc_init(&desc[idx]);
1493                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1494                 set_key_size_aes(&desc[idx], key_len);
1495                 set_cmac_size0_mode(&desc[idx]);
1496                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1497                 idx++;
1498         } else if (rem_cnt > 0) {
1499                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1500         } else {
1501                 hw_desc_init(&desc[idx]);
1502                 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1503                 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1504                 idx++;
1505         }
1506
1507         /* Get final MAC result */
1508         hw_desc_init(&desc[idx]);
1509         /* TODO */
1510         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1511                       digestsize, NS_BIT, 1);
1512         set_queue_last_ind(&desc[idx]);
1513         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1514         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1515         set_cipher_mode(&desc[idx], ctx->hw_mode);
1516         idx++;
1517
1518         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1519         if (unlikely(rc != -EINPROGRESS)) {
1520                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1521                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1522                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1523         }
1524         return rc;
1525 }
1526
1527 static int ssi_mac_finup(struct ahash_request *req)
1528 {
1529         struct ahash_req_ctx *state = ahash_request_ctx(req);
1530         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1531         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1532         struct device *dev = drvdata_to_dev(ctx->drvdata);
1533         struct ssi_crypto_req ssi_req = {};
1534         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1535         int idx = 0;
1536         int rc = 0;
1537         u32 key_len = 0;
1538         u32 digestsize = crypto_ahash_digestsize(tfm);
1539
1540         dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1541         if (state->xcbc_count > 0 && req->nbytes == 0) {
1542                 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1543                 return ssi_mac_final(req);
1544         }
1545
1546         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1547                 dev_err(dev, "map_ahash_request_final() failed\n");
1548                 return -ENOMEM;
1549         }
1550         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1551                 dev_err(dev, "map_ahash_digest() failed\n");
1552                 return -ENOMEM;
1553         }
1554
1555         /* Setup DX request structure */
1556         ssi_req.user_cb = (void *)ssi_hash_complete;
1557         ssi_req.user_arg = (void *)req;
1558
1559         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1560                 key_len = CC_AES_128_BIT_KEY_SIZE;
1561                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1562         } else {
1563                 key_len = ctx->key_params.keylen;
1564                 ssi_hash_create_cmac_setup(req, desc, &idx);
1565         }
1566
1567         if (req->nbytes == 0) {
1568                 hw_desc_init(&desc[idx]);
1569                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1570                 set_key_size_aes(&desc[idx], key_len);
1571                 set_cmac_size0_mode(&desc[idx]);
1572                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1573                 idx++;
1574         } else {
1575                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1576         }
1577
1578         /* Get final MAC result */
1579         hw_desc_init(&desc[idx]);
1580         /* TODO */
1581         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1582                       digestsize, NS_BIT, 1);
1583         set_queue_last_ind(&desc[idx]);
1584         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1585         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1586         set_cipher_mode(&desc[idx], ctx->hw_mode);
1587         idx++;
1588
1589         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1590         if (unlikely(rc != -EINPROGRESS)) {
1591                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1592                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1593                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1594         }
1595         return rc;
1596 }
1597
1598 static int ssi_mac_digest(struct ahash_request *req)
1599 {
1600         struct ahash_req_ctx *state = ahash_request_ctx(req);
1601         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1602         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1603         struct device *dev = drvdata_to_dev(ctx->drvdata);
1604         u32 digestsize = crypto_ahash_digestsize(tfm);
1605         struct ssi_crypto_req ssi_req = {};
1606         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1607         u32 key_len;
1608         int idx = 0;
1609         int rc;
1610
1611         dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1612
1613         if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
1614                 dev_err(dev, "map_ahash_source() failed\n");
1615                 return -ENOMEM;
1616         }
1617         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1618                 dev_err(dev, "map_ahash_digest() failed\n");
1619                 return -ENOMEM;
1620         }
1621
1622         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1623                 dev_err(dev, "map_ahash_request_final() failed\n");
1624                 return -ENOMEM;
1625         }
1626
1627         /* Setup DX request structure */
1628         ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1629         ssi_req.user_arg = (void *)req;
1630
1631         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1632                 key_len = CC_AES_128_BIT_KEY_SIZE;
1633                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1634         } else {
1635                 key_len = ctx->key_params.keylen;
1636                 ssi_hash_create_cmac_setup(req, desc, &idx);
1637         }
1638
1639         if (req->nbytes == 0) {
1640                 hw_desc_init(&desc[idx]);
1641                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1642                 set_key_size_aes(&desc[idx], key_len);
1643                 set_cmac_size0_mode(&desc[idx]);
1644                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1645                 idx++;
1646         } else {
1647                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1648         }
1649
1650         /* Get final MAC result */
1651         hw_desc_init(&desc[idx]);
1652         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1653                       CC_AES_BLOCK_SIZE, NS_BIT, 1);
1654         set_queue_last_ind(&desc[idx]);
1655         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1656         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1657         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1658         set_cipher_mode(&desc[idx], ctx->hw_mode);
1659         idx++;
1660
1661         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1662         if (unlikely(rc != -EINPROGRESS)) {
1663                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1664                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1665                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1666                 ssi_hash_unmap_request(dev, state, ctx);
1667         }
1668         return rc;
1669 }
1670
1671 //ahash wrap functions
1672 static int ssi_ahash_digest(struct ahash_request *req)
1673 {
1674         struct ahash_req_ctx *state = ahash_request_ctx(req);
1675         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1676         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1677         u32 digestsize = crypto_ahash_digestsize(tfm);
1678
1679         return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1680 }
1681
1682 static int ssi_ahash_update(struct ahash_request *req)
1683 {
1684         struct ahash_req_ctx *state = ahash_request_ctx(req);
1685         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1686         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1687         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1688
1689         return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1690 }
1691
1692 static int ssi_ahash_finup(struct ahash_request *req)
1693 {
1694         struct ahash_req_ctx *state = ahash_request_ctx(req);
1695         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1696         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1697         u32 digestsize = crypto_ahash_digestsize(tfm);
1698
1699         return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1700 }
1701
1702 static int ssi_ahash_final(struct ahash_request *req)
1703 {
1704         struct ahash_req_ctx *state = ahash_request_ctx(req);
1705         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1706         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1707         u32 digestsize = crypto_ahash_digestsize(tfm);
1708
1709         return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1710 }
1711
1712 static int ssi_ahash_init(struct ahash_request *req)
1713 {
1714         struct ahash_req_ctx *state = ahash_request_ctx(req);
1715         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1716         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1717         struct device *dev = drvdata_to_dev(ctx->drvdata);
1718
1719         dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
1720
1721         return ssi_hash_init(state, ctx);
1722 }
1723
1724 static int ssi_ahash_export(struct ahash_request *req, void *out)
1725 {
1726         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1727         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1728         struct device *dev = drvdata_to_dev(ctx->drvdata);
1729         struct ahash_req_ctx *state = ahash_request_ctx(req);
1730         u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1731         u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1732                                 state->buff0_cnt;
1733         const u32 tmp = CC_EXPORT_MAGIC;
1734
1735         memcpy(out, &tmp, sizeof(u32));
1736         out += sizeof(u32);
1737
1738         dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1739                                 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1740         memcpy(out, state->digest_buff, ctx->inter_digestsize);
1741         out += ctx->inter_digestsize;
1742
1743         if (state->digest_bytes_len_dma_addr) {
1744                 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1745                                         HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1746                 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1747         } else {
1748                 /* Poison the unused exported digest len field. */
1749                 memset(out, 0x5F, HASH_LEN_SIZE);
1750         }
1751         out += HASH_LEN_SIZE;
1752
1753         memcpy(out, &curr_buff_cnt, sizeof(u32));
1754         out += sizeof(u32);
1755
1756         memcpy(out, curr_buff, curr_buff_cnt);
1757
1758         /* No sync for device ineeded since we did not change the data,
1759          * we only copy it
1760          */
1761
1762         return 0;
1763 }
1764
1765 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1766 {
1767         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1768         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1769         struct device *dev = drvdata_to_dev(ctx->drvdata);
1770         struct ahash_req_ctx *state = ahash_request_ctx(req);
1771         u32 tmp;
1772         int rc = 0;
1773
1774         memcpy(&tmp, in, sizeof(u32));
1775         if (tmp != CC_EXPORT_MAGIC) {
1776                 rc = -EINVAL;
1777                 goto out;
1778         }
1779         in += sizeof(u32);
1780
1781         /* call init() to allocate bufs if the user hasn't */
1782         if (!state->digest_buff) {
1783                 rc = ssi_hash_init(state, ctx);
1784                 if (rc)
1785                         goto out;
1786         }
1787
1788         dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1789                                 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1790         memcpy(state->digest_buff, in, ctx->inter_digestsize);
1791         in += ctx->inter_digestsize;
1792
1793         if (state->digest_bytes_len_dma_addr) {
1794                 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1795                                         HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1796                 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1797         }
1798         in += HASH_LEN_SIZE;
1799
1800         dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1801                                    ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1802
1803         if (state->digest_bytes_len_dma_addr)
1804                 dma_sync_single_for_device(dev,
1805                                            state->digest_bytes_len_dma_addr,
1806                                            HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1807
1808         state->buff_index = 0;
1809
1810         /* Sanity check the data as much as possible */
1811         memcpy(&tmp, in, sizeof(u32));
1812         if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1813                 rc = -EINVAL;
1814                 goto out;
1815         }
1816         in += sizeof(u32);
1817
1818         state->buff0_cnt = tmp;
1819         memcpy(state->buff0, in, state->buff0_cnt);
1820
1821 out:
1822         return rc;
1823 }
1824
1825 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1826                             const u8 *key, unsigned int keylen)
1827 {
1828         return ssi_hash_setkey((void *)ahash, key, keylen, false);
1829 }
1830
1831 struct ssi_hash_template {
1832         char name[CRYPTO_MAX_ALG_NAME];
1833         char driver_name[CRYPTO_MAX_ALG_NAME];
1834         char mac_name[CRYPTO_MAX_ALG_NAME];
1835         char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1836         unsigned int blocksize;
1837         bool synchronize;
1838         struct ahash_alg template_ahash;
1839         int hash_mode;
1840         int hw_mode;
1841         int inter_digestsize;
1842         struct ssi_drvdata *drvdata;
1843 };
1844
1845 #define CC_STATE_SIZE(_x) \
1846         ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1847
1848 /* hash descriptors */
1849 static struct ssi_hash_template driver_hash[] = {
1850         //Asynchronize hash template
1851         {
1852                 .name = "sha1",
1853                 .driver_name = "sha1-dx",
1854                 .mac_name = "hmac(sha1)",
1855                 .mac_driver_name = "hmac-sha1-dx",
1856                 .blocksize = SHA1_BLOCK_SIZE,
1857                 .synchronize = false,
1858                 .template_ahash = {
1859                         .init = ssi_ahash_init,
1860                         .update = ssi_ahash_update,
1861                         .final = ssi_ahash_final,
1862                         .finup = ssi_ahash_finup,
1863                         .digest = ssi_ahash_digest,
1864                         .export = ssi_ahash_export,
1865                         .import = ssi_ahash_import,
1866                         .setkey = ssi_ahash_setkey,
1867                         .halg = {
1868                                 .digestsize = SHA1_DIGEST_SIZE,
1869                                 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1870                         },
1871                 },
1872                 .hash_mode = DRV_HASH_SHA1,
1873                 .hw_mode = DRV_HASH_HW_SHA1,
1874                 .inter_digestsize = SHA1_DIGEST_SIZE,
1875         },
1876         {
1877                 .name = "sha256",
1878                 .driver_name = "sha256-dx",
1879                 .mac_name = "hmac(sha256)",
1880                 .mac_driver_name = "hmac-sha256-dx",
1881                 .blocksize = SHA256_BLOCK_SIZE,
1882                 .template_ahash = {
1883                         .init = ssi_ahash_init,
1884                         .update = ssi_ahash_update,
1885                         .final = ssi_ahash_final,
1886                         .finup = ssi_ahash_finup,
1887                         .digest = ssi_ahash_digest,
1888                         .export = ssi_ahash_export,
1889                         .import = ssi_ahash_import,
1890                         .setkey = ssi_ahash_setkey,
1891                         .halg = {
1892                                 .digestsize = SHA256_DIGEST_SIZE,
1893                                 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1894                         },
1895                 },
1896                 .hash_mode = DRV_HASH_SHA256,
1897                 .hw_mode = DRV_HASH_HW_SHA256,
1898                 .inter_digestsize = SHA256_DIGEST_SIZE,
1899         },
1900         {
1901                 .name = "sha224",
1902                 .driver_name = "sha224-dx",
1903                 .mac_name = "hmac(sha224)",
1904                 .mac_driver_name = "hmac-sha224-dx",
1905                 .blocksize = SHA224_BLOCK_SIZE,
1906                 .template_ahash = {
1907                         .init = ssi_ahash_init,
1908                         .update = ssi_ahash_update,
1909                         .final = ssi_ahash_final,
1910                         .finup = ssi_ahash_finup,
1911                         .digest = ssi_ahash_digest,
1912                         .export = ssi_ahash_export,
1913                         .import = ssi_ahash_import,
1914                         .setkey = ssi_ahash_setkey,
1915                         .halg = {
1916                                 .digestsize = SHA224_DIGEST_SIZE,
1917                                 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1918                         },
1919                 },
1920                 .hash_mode = DRV_HASH_SHA224,
1921                 .hw_mode = DRV_HASH_HW_SHA256,
1922                 .inter_digestsize = SHA256_DIGEST_SIZE,
1923         },
1924 #if (DX_DEV_SHA_MAX > 256)
1925         {
1926                 .name = "sha384",
1927                 .driver_name = "sha384-dx",
1928                 .mac_name = "hmac(sha384)",
1929                 .mac_driver_name = "hmac-sha384-dx",
1930                 .blocksize = SHA384_BLOCK_SIZE,
1931                 .template_ahash = {
1932                         .init = ssi_ahash_init,
1933                         .update = ssi_ahash_update,
1934                         .final = ssi_ahash_final,
1935                         .finup = ssi_ahash_finup,
1936                         .digest = ssi_ahash_digest,
1937                         .export = ssi_ahash_export,
1938                         .import = ssi_ahash_import,
1939                         .setkey = ssi_ahash_setkey,
1940                         .halg = {
1941                                 .digestsize = SHA384_DIGEST_SIZE,
1942                                 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1943                         },
1944                 },
1945                 .hash_mode = DRV_HASH_SHA384,
1946                 .hw_mode = DRV_HASH_HW_SHA512,
1947                 .inter_digestsize = SHA512_DIGEST_SIZE,
1948         },
1949         {
1950                 .name = "sha512",
1951                 .driver_name = "sha512-dx",
1952                 .mac_name = "hmac(sha512)",
1953                 .mac_driver_name = "hmac-sha512-dx",
1954                 .blocksize = SHA512_BLOCK_SIZE,
1955                 .template_ahash = {
1956                         .init = ssi_ahash_init,
1957                         .update = ssi_ahash_update,
1958                         .final = ssi_ahash_final,
1959                         .finup = ssi_ahash_finup,
1960                         .digest = ssi_ahash_digest,
1961                         .export = ssi_ahash_export,
1962                         .import = ssi_ahash_import,
1963                         .setkey = ssi_ahash_setkey,
1964                         .halg = {
1965                                 .digestsize = SHA512_DIGEST_SIZE,
1966                                 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1967                         },
1968                 },
1969                 .hash_mode = DRV_HASH_SHA512,
1970                 .hw_mode = DRV_HASH_HW_SHA512,
1971                 .inter_digestsize = SHA512_DIGEST_SIZE,
1972         },
1973 #endif
1974         {
1975                 .name = "md5",
1976                 .driver_name = "md5-dx",
1977                 .mac_name = "hmac(md5)",
1978                 .mac_driver_name = "hmac-md5-dx",
1979                 .blocksize = MD5_HMAC_BLOCK_SIZE,
1980                 .template_ahash = {
1981                         .init = ssi_ahash_init,
1982                         .update = ssi_ahash_update,
1983                         .final = ssi_ahash_final,
1984                         .finup = ssi_ahash_finup,
1985                         .digest = ssi_ahash_digest,
1986                         .export = ssi_ahash_export,
1987                         .import = ssi_ahash_import,
1988                         .setkey = ssi_ahash_setkey,
1989                         .halg = {
1990                                 .digestsize = MD5_DIGEST_SIZE,
1991                                 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1992                         },
1993                 },
1994                 .hash_mode = DRV_HASH_MD5,
1995                 .hw_mode = DRV_HASH_HW_MD5,
1996                 .inter_digestsize = MD5_DIGEST_SIZE,
1997         },
1998         {
1999                 .mac_name = "xcbc(aes)",
2000                 .mac_driver_name = "xcbc-aes-dx",
2001                 .blocksize = AES_BLOCK_SIZE,
2002                 .template_ahash = {
2003                         .init = ssi_ahash_init,
2004                         .update = ssi_mac_update,
2005                         .final = ssi_mac_final,
2006                         .finup = ssi_mac_finup,
2007                         .digest = ssi_mac_digest,
2008                         .setkey = ssi_xcbc_setkey,
2009                         .export = ssi_ahash_export,
2010                         .import = ssi_ahash_import,
2011                         .halg = {
2012                                 .digestsize = AES_BLOCK_SIZE,
2013                                 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2014                         },
2015                 },
2016                 .hash_mode = DRV_HASH_NULL,
2017                 .hw_mode = DRV_CIPHER_XCBC_MAC,
2018                 .inter_digestsize = AES_BLOCK_SIZE,
2019         },
2020 #if SSI_CC_HAS_CMAC
2021         {
2022                 .mac_name = "cmac(aes)",
2023                 .mac_driver_name = "cmac-aes-dx",
2024                 .blocksize = AES_BLOCK_SIZE,
2025                 .template_ahash = {
2026                         .init = ssi_ahash_init,
2027                         .update = ssi_mac_update,
2028                         .final = ssi_mac_final,
2029                         .finup = ssi_mac_finup,
2030                         .digest = ssi_mac_digest,
2031                         .setkey = ssi_cmac_setkey,
2032                         .export = ssi_ahash_export,
2033                         .import = ssi_ahash_import,
2034                         .halg = {
2035                                 .digestsize = AES_BLOCK_SIZE,
2036                                 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2037                         },
2038                 },
2039                 .hash_mode = DRV_HASH_NULL,
2040                 .hw_mode = DRV_CIPHER_CMAC,
2041                 .inter_digestsize = AES_BLOCK_SIZE,
2042         },
2043 #endif
2044
2045 };
2046
2047 static struct ssi_hash_alg *
2048 ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
2049                     bool keyed)
2050 {
2051         struct ssi_hash_alg *t_crypto_alg;
2052         struct crypto_alg *alg;
2053         struct ahash_alg *halg;
2054
2055         t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
2056         if (!t_crypto_alg)
2057                 return ERR_PTR(-ENOMEM);
2058
2059
2060         t_crypto_alg->ahash_alg = template->template_ahash;
2061         halg = &t_crypto_alg->ahash_alg;
2062         alg = &halg->halg.base;
2063
2064         if (keyed) {
2065                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2066                          template->mac_name);
2067                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2068                          template->mac_driver_name);
2069         } else {
2070                 halg->setkey = NULL;
2071                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2072                          template->name);
2073                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2074                          template->driver_name);
2075         }
2076         alg->cra_module = THIS_MODULE;
2077         alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2078         alg->cra_priority = SSI_CRA_PRIO;
2079         alg->cra_blocksize = template->blocksize;
2080         alg->cra_alignmask = 0;
2081         alg->cra_exit = ssi_hash_cra_exit;
2082
2083         alg->cra_init = ssi_ahash_cra_init;
2084         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2085                         CRYPTO_ALG_KERN_DRIVER_ONLY;
2086         alg->cra_type = &crypto_ahash_type;
2087
2088         t_crypto_alg->hash_mode = template->hash_mode;
2089         t_crypto_alg->hw_mode = template->hw_mode;
2090         t_crypto_alg->inter_digestsize = template->inter_digestsize;
2091
2092         return t_crypto_alg;
2093 }
2094
2095 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2096 {
2097         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2098         ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2099         unsigned int larval_seq_len = 0;
2100         struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
2101         struct device *dev = drvdata_to_dev(drvdata);
2102         int rc = 0;
2103 #if (DX_DEV_SHA_MAX > 256)
2104         int i;
2105 #endif
2106
2107         /* Copy-to-sram digest-len */
2108         ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
2109                                      ARRAY_SIZE(digest_len_init),
2110                                      larval_seq, &larval_seq_len);
2111         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2112         if (unlikely(rc != 0))
2113                 goto init_digest_const_err;
2114
2115         sram_buff_ofs += sizeof(digest_len_init);
2116         larval_seq_len = 0;
2117
2118 #if (DX_DEV_SHA_MAX > 256)
2119         /* Copy-to-sram digest-len for sha384/512 */
2120         ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
2121                                      ARRAY_SIZE(digest_len_sha512_init),
2122                                      larval_seq, &larval_seq_len);
2123         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2124         if (unlikely(rc != 0))
2125                 goto init_digest_const_err;
2126
2127         sram_buff_ofs += sizeof(digest_len_sha512_init);
2128         larval_seq_len = 0;
2129 #endif
2130
2131         /* The initial digests offset */
2132         hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2133
2134         /* Copy-to-sram initial SHA* digests */
2135         ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
2136                                      ARRAY_SIZE(md5_init), larval_seq,
2137                                      &larval_seq_len);
2138         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2139         if (unlikely(rc != 0))
2140                 goto init_digest_const_err;
2141         sram_buff_ofs += sizeof(md5_init);
2142         larval_seq_len = 0;
2143
2144         ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
2145                                      ARRAY_SIZE(sha1_init), larval_seq,
2146                                      &larval_seq_len);
2147         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2148         if (unlikely(rc != 0))
2149                 goto init_digest_const_err;
2150         sram_buff_ofs += sizeof(sha1_init);
2151         larval_seq_len = 0;
2152
2153         ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
2154                                      ARRAY_SIZE(sha224_init), larval_seq,
2155                                      &larval_seq_len);
2156         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2157         if (unlikely(rc != 0))
2158                 goto init_digest_const_err;
2159         sram_buff_ofs += sizeof(sha224_init);
2160         larval_seq_len = 0;
2161
2162         ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
2163                                      ARRAY_SIZE(sha256_init), larval_seq,
2164                                      &larval_seq_len);
2165         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2166         if (unlikely(rc != 0))
2167                 goto init_digest_const_err;
2168         sram_buff_ofs += sizeof(sha256_init);
2169         larval_seq_len = 0;
2170
2171 #if (DX_DEV_SHA_MAX > 256)
2172         /* We are forced to swap each double-word larval before copying to sram */
2173         for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2174                 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2175                 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
2176
2177                 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2178                                              larval_seq, &larval_seq_len);
2179                 sram_buff_ofs += sizeof(u32);
2180                 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2181                                              larval_seq, &larval_seq_len);
2182                 sram_buff_ofs += sizeof(u32);
2183         }
2184         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2185         if (unlikely(rc != 0)) {
2186                 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2187                 goto init_digest_const_err;
2188         }
2189         larval_seq_len = 0;
2190
2191         for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2192                 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2193                 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
2194
2195                 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2196                                              larval_seq, &larval_seq_len);
2197                 sram_buff_ofs += sizeof(u32);
2198                 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2199                                              larval_seq, &larval_seq_len);
2200                 sram_buff_ofs += sizeof(u32);
2201         }
2202         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2203         if (unlikely(rc != 0)) {
2204                 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2205                 goto init_digest_const_err;
2206         }
2207 #endif
2208
2209 init_digest_const_err:
2210         return rc;
2211 }
2212
2213 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2214 {
2215         struct ssi_hash_handle *hash_handle;
2216         ssi_sram_addr_t sram_buff;
2217         u32 sram_size_to_alloc;
2218         struct device *dev = drvdata_to_dev(drvdata);
2219         int rc = 0;
2220         int alg;
2221
2222         hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2223         if (!hash_handle)
2224                 return -ENOMEM;
2225
2226         INIT_LIST_HEAD(&hash_handle->hash_list);
2227         drvdata->hash_handle = hash_handle;
2228
2229         sram_size_to_alloc = sizeof(digest_len_init) +
2230 #if (DX_DEV_SHA_MAX > 256)
2231                         sizeof(digest_len_sha512_init) +
2232                         sizeof(sha384_init) +
2233                         sizeof(sha512_init) +
2234 #endif
2235                         sizeof(md5_init) +
2236                         sizeof(sha1_init) +
2237                         sizeof(sha224_init) +
2238                         sizeof(sha256_init);
2239
2240         sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2241         if (sram_buff == NULL_SRAM_ADDR) {
2242                 dev_err(dev, "SRAM pool exhausted\n");
2243                 rc = -ENOMEM;
2244                 goto fail;
2245         }
2246
2247         /* The initial digest-len offset */
2248         hash_handle->digest_len_sram_addr = sram_buff;
2249
2250         /*must be set before the alg registration as it is being used there*/
2251         rc = ssi_hash_init_sram_digest_consts(drvdata);
2252         if (unlikely(rc != 0)) {
2253                 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2254                 goto fail;
2255         }
2256
2257         /* ahash registration */
2258         for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2259                 struct ssi_hash_alg *t_alg;
2260                 int hw_mode = driver_hash[alg].hw_mode;
2261
2262                 /* register hmac version */
2263                 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
2264                 if (IS_ERR(t_alg)) {
2265                         rc = PTR_ERR(t_alg);
2266                         dev_err(dev, "%s alg allocation failed\n",
2267                                 driver_hash[alg].driver_name);
2268                         goto fail;
2269                 }
2270                 t_alg->drvdata = drvdata;
2271
2272                 rc = crypto_register_ahash(&t_alg->ahash_alg);
2273                 if (unlikely(rc)) {
2274                         dev_err(dev, "%s alg registration failed\n",
2275                                 driver_hash[alg].driver_name);
2276                         kfree(t_alg);
2277                         goto fail;
2278                 } else {
2279                         list_add_tail(&t_alg->entry,
2280                                       &hash_handle->hash_list);
2281                 }
2282
2283                 if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
2284                     (hw_mode == DRV_CIPHER_CMAC))
2285                         continue;
2286
2287                 /* register hash version */
2288                 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
2289                 if (IS_ERR(t_alg)) {
2290                         rc = PTR_ERR(t_alg);
2291                         dev_err(dev, "%s alg allocation failed\n",
2292                                 driver_hash[alg].driver_name);
2293                         goto fail;
2294                 }
2295                 t_alg->drvdata = drvdata;
2296
2297                 rc = crypto_register_ahash(&t_alg->ahash_alg);
2298                 if (unlikely(rc)) {
2299                         dev_err(dev, "%s alg registration failed\n",
2300                                 driver_hash[alg].driver_name);
2301                         kfree(t_alg);
2302                         goto fail;
2303                 } else {
2304                         list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2305                 }
2306         }
2307
2308         return 0;
2309
2310 fail:
2311         kfree(drvdata->hash_handle);
2312         drvdata->hash_handle = NULL;
2313         return rc;
2314 }
2315
2316 int ssi_hash_free(struct ssi_drvdata *drvdata)
2317 {
2318         struct ssi_hash_alg *t_hash_alg, *hash_n;
2319         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2320
2321         if (hash_handle) {
2322                 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2323                         crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2324                         list_del(&t_hash_alg->entry);
2325                         kfree(t_hash_alg);
2326                 }
2327
2328                 kfree(hash_handle);
2329                 drvdata->hash_handle = NULL;
2330         }
2331         return 0;
2332 }
2333
2334 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2335                                        struct cc_hw_desc desc[],
2336                                        unsigned int *seq_size)
2337 {
2338         unsigned int idx = *seq_size;
2339         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2340         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2341         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2342
2343         /* Setup XCBC MAC K1 */
2344         hw_desc_init(&desc[idx]);
2345         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2346                                             XCBC_MAC_K1_OFFSET),
2347                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2348         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2349         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2350         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2351         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2352         set_flow_mode(&desc[idx], S_DIN_to_AES);
2353         idx++;
2354
2355         /* Setup XCBC MAC K2 */
2356         hw_desc_init(&desc[idx]);
2357         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2358                                             XCBC_MAC_K2_OFFSET),
2359                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2360         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2361         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2362         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2363         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2364         set_flow_mode(&desc[idx], S_DIN_to_AES);
2365         idx++;
2366
2367         /* Setup XCBC MAC K3 */
2368         hw_desc_init(&desc[idx]);
2369         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2370                                             XCBC_MAC_K3_OFFSET),
2371                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2372         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2373         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2374         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2375         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2376         set_flow_mode(&desc[idx], S_DIN_to_AES);
2377         idx++;
2378
2379         /* Loading MAC state */
2380         hw_desc_init(&desc[idx]);
2381         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2382                      CC_AES_BLOCK_SIZE, NS_BIT);
2383         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2384         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2385         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2386         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2387         set_flow_mode(&desc[idx], S_DIN_to_AES);
2388         idx++;
2389         *seq_size = idx;
2390 }
2391
2392 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2393                                        struct cc_hw_desc desc[],
2394                                        unsigned int *seq_size)
2395 {
2396         unsigned int idx = *seq_size;
2397         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2398         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2399         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2400
2401         /* Setup CMAC Key */
2402         hw_desc_init(&desc[idx]);
2403         set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2404                      ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2405                       ctx->key_params.keylen), NS_BIT);
2406         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2407         set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2408         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2409         set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2410         set_flow_mode(&desc[idx], S_DIN_to_AES);
2411         idx++;
2412
2413         /* Load MAC state */
2414         hw_desc_init(&desc[idx]);
2415         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2416                      CC_AES_BLOCK_SIZE, NS_BIT);
2417         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2418         set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2419         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2420         set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2421         set_flow_mode(&desc[idx], S_DIN_to_AES);
2422         idx++;
2423         *seq_size = idx;
2424 }
2425
2426 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2427                                       struct ssi_hash_ctx *ctx,
2428                                       unsigned int flow_mode,
2429                                       struct cc_hw_desc desc[],
2430                                       bool is_not_last_data,
2431                                       unsigned int *seq_size)
2432 {
2433         unsigned int idx = *seq_size;
2434         struct device *dev = drvdata_to_dev(ctx->drvdata);
2435
2436         if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2437                 hw_desc_init(&desc[idx]);
2438                 set_din_type(&desc[idx], DMA_DLLI,
2439                              sg_dma_address(areq_ctx->curr_sg),
2440                              areq_ctx->curr_sg->length, NS_BIT);
2441                 set_flow_mode(&desc[idx], flow_mode);
2442                 idx++;
2443         } else {
2444                 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2445                         dev_dbg(dev, " NULL mode\n");
2446                         /* nothing to build */
2447                         return;
2448                 }
2449                 /* bypass */
2450                 hw_desc_init(&desc[idx]);
2451                 set_din_type(&desc[idx], DMA_DLLI,
2452                              areq_ctx->mlli_params.mlli_dma_addr,
2453                              areq_ctx->mlli_params.mlli_len, NS_BIT);
2454                 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2455                               areq_ctx->mlli_params.mlli_len);
2456                 set_flow_mode(&desc[idx], BYPASS);
2457                 idx++;
2458                 /* process */
2459                 hw_desc_init(&desc[idx]);
2460                 set_din_type(&desc[idx], DMA_MLLI,
2461                              ctx->drvdata->mlli_sram_addr,
2462                              areq_ctx->mlli_nents, NS_BIT);
2463                 set_flow_mode(&desc[idx], flow_mode);
2464                 idx++;
2465         }
2466         if (is_not_last_data)
2467                 set_din_not_last_indication(&desc[(idx - 1)]);
2468         /* return updated desc sequence size */
2469         *seq_size = idx;
2470 }
2471
2472 /*!
2473  * Gets the address of the initial digest in SRAM
2474  * according to the given hash mode
2475  *
2476  * \param drvdata
2477  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2478  *
2479  * \return u32 The address of the inital digest in SRAM
2480  */
2481 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
2482 {
2483         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2484         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2485         struct device *dev = drvdata_to_dev(_drvdata);
2486
2487         switch (mode) {
2488         case DRV_HASH_NULL:
2489                 break; /*Ignore*/
2490         case DRV_HASH_MD5:
2491                 return (hash_handle->larval_digest_sram_addr);
2492         case DRV_HASH_SHA1:
2493                 return (hash_handle->larval_digest_sram_addr +
2494                         sizeof(md5_init));
2495         case DRV_HASH_SHA224:
2496                 return (hash_handle->larval_digest_sram_addr +
2497                         sizeof(md5_init) +
2498                         sizeof(sha1_init));
2499         case DRV_HASH_SHA256:
2500                 return (hash_handle->larval_digest_sram_addr +
2501                         sizeof(md5_init) +
2502                         sizeof(sha1_init) +
2503                         sizeof(sha224_init));
2504 #if (DX_DEV_SHA_MAX > 256)
2505         case DRV_HASH_SHA384:
2506                 return (hash_handle->larval_digest_sram_addr +
2507                         sizeof(md5_init) +
2508                         sizeof(sha1_init) +
2509                         sizeof(sha224_init) +
2510                         sizeof(sha256_init));
2511         case DRV_HASH_SHA512:
2512                 return (hash_handle->larval_digest_sram_addr +
2513                         sizeof(md5_init) +
2514                         sizeof(sha1_init) +
2515                         sizeof(sha224_init) +
2516                         sizeof(sha256_init) +
2517                         sizeof(sha384_init));
2518 #endif
2519         default:
2520                 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2521         }
2522
2523         /*This is valid wrong value to avoid kernel crash*/
2524         return hash_handle->larval_digest_sram_addr;
2525 }
2526
2527 ssi_sram_addr_t
2528 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
2529 {
2530         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2531         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2532         ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2533
2534         switch (mode) {
2535         case DRV_HASH_SHA1:
2536         case DRV_HASH_SHA224:
2537         case DRV_HASH_SHA256:
2538         case DRV_HASH_MD5:
2539                 return digest_len_addr;
2540 #if (DX_DEV_SHA_MAX > 256)
2541         case DRV_HASH_SHA384:
2542         case DRV_HASH_SHA512:
2543                 return  digest_len_addr + sizeof(digest_len_init);
2544 #endif
2545         default:
2546                 return digest_len_addr; /*to avoid kernel crash*/
2547         }
2548 }
2549