1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 enum dma_buffer_type {
22 struct buff_mgr_handle {
23 struct dma_pool *mlli_buffs_pool;
26 union buffer_array_entry {
27 struct scatterlist *sgl;
28 dma_addr_t buffer_dma;
32 unsigned int num_of_buffers;
33 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
34 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
35 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
36 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
37 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
38 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
39 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
42 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
57 * cc_copy_mac() - Copy MAC to temporary location
60 * @req: aead request object
61 * @dir: [IN] copy from/to sgl
63 static void cc_copy_mac(struct device *dev, struct aead_request *req,
64 enum cc_sg_cpy_direct dir)
66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
67 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
68 u32 skip = req->assoclen + req->cryptlen;
70 if (areq_ctx->is_gcm4543)
71 skip += crypto_aead_ivsize(tfm);
73 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
74 (skip - areq_ctx->req_authsize), skip, dir);
78 * cc_get_sgl_nents() - Get scatterlist number of entries.
81 * @nbytes: [IN] Total SGL data bytes.
82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
84 static unsigned int cc_get_sgl_nents(struct device *dev,
85 struct scatterlist *sg_list,
86 unsigned int nbytes, u32 *lbytes)
88 unsigned int nents = 0;
90 while (nbytes && sg_list) {
92 /* get the number of bytes in the last entry */
94 nbytes -= (sg_list->length > nbytes) ?
95 nbytes : sg_list->length;
96 sg_list = sg_next(sg_list);
98 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
103 * cc_zero_sgl() - Zero scatter scatter list data.
107 void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
109 struct scatterlist *current_sg = sgl;
112 while (sg_index <= data_len) {
114 /* reached the end of the sgl --> just return back */
117 memset(sg_virt(current_sg), 0, current_sg->length);
118 sg_index += current_sg->length;
119 current_sg = sg_next(current_sg);
124 * cc_copy_sg_portion() - Copy scatter list data,
125 * from to_skip to end, to dest and vice versa
133 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
134 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
138 nents = sg_nents_for_len(sg, end);
139 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
140 (direct == CC_SG_TO_BUF));
143 static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
144 u32 buff_size, u32 *curr_nents,
147 u32 *mlli_entry_p = *mlli_entry_pp;
150 /* Verify there is no memory overflow*/
151 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
152 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
153 dev_err(dev, "Too many mlli entries. current %d max %d\n",
154 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
158 /*handle buffer longer than 64 kbytes */
159 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
160 cc_lli_set_addr(mlli_entry_p, buff_dma);
161 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
162 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
163 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
164 mlli_entry_p[LLI_WORD1_OFFSET]);
165 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
166 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
167 mlli_entry_p = mlli_entry_p + 2;
171 cc_lli_set_addr(mlli_entry_p, buff_dma);
172 cc_lli_set_size(mlli_entry_p, buff_size);
173 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
174 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
175 mlli_entry_p[LLI_WORD1_OFFSET]);
176 mlli_entry_p = mlli_entry_p + 2;
177 *mlli_entry_pp = mlli_entry_p;
182 static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
183 u32 sgl_data_len, u32 sgl_offset,
184 u32 *curr_nents, u32 **mlli_entry_pp)
186 struct scatterlist *curr_sgl = sgl;
187 u32 *mlli_entry_p = *mlli_entry_pp;
190 for ( ; (curr_sgl && sgl_data_len);
191 curr_sgl = sg_next(curr_sgl)) {
193 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
194 sg_dma_len(curr_sgl) - sgl_offset :
196 sgl_data_len -= entry_data_len;
197 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
198 sgl_offset, entry_data_len,
199 curr_nents, &mlli_entry_p);
205 *mlli_entry_pp = mlli_entry_p;
209 static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
210 struct mlli_params *mlli_params, gfp_t flags)
213 u32 total_nents = 0, prev_total_nents = 0;
216 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
218 /* Allocate memory from the pointed pool */
219 mlli_params->mlli_virt_addr =
220 dma_pool_alloc(mlli_params->curr_pool, flags,
221 &mlli_params->mlli_dma_addr);
222 if (!mlli_params->mlli_virt_addr) {
223 dev_err(dev, "dma_pool_alloc() failed\n");
225 goto build_mlli_exit;
227 /* Point to start of MLLI */
228 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
229 /* go over all SG's and link it to one MLLI table */
230 for (i = 0; i < sg_data->num_of_buffers; i++) {
231 union buffer_array_entry *entry = &sg_data->entry[i];
232 u32 tot_len = sg_data->total_data_len[i];
233 u32 offset = sg_data->offset[i];
235 if (sg_data->type[i] == DMA_SGL_TYPE)
236 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
237 offset, &total_nents,
239 else /*DMA_BUFF_TYPE*/
240 rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
241 tot_len, &total_nents,
246 /* set last bit in the current table */
247 if (sg_data->mlli_nents[i]) {
248 /*Calculate the current MLLI table length for the
249 *length field in the descriptor
251 *sg_data->mlli_nents[i] +=
252 (total_nents - prev_total_nents);
253 prev_total_nents = total_nents;
257 /* Set MLLI size for the bypass operation */
258 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
260 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
261 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
262 mlli_params->mlli_len);
268 static void cc_add_buffer_entry(struct device *dev,
269 struct buffer_array *sgl_data,
270 dma_addr_t buffer_dma, unsigned int buffer_len,
271 bool is_last_entry, u32 *mlli_nents)
273 unsigned int index = sgl_data->num_of_buffers;
275 dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
276 index, &buffer_dma, buffer_len, is_last_entry);
277 sgl_data->nents[index] = 1;
278 sgl_data->entry[index].buffer_dma = buffer_dma;
279 sgl_data->offset[index] = 0;
280 sgl_data->total_data_len[index] = buffer_len;
281 sgl_data->type[index] = DMA_BUFF_TYPE;
282 sgl_data->is_last[index] = is_last_entry;
283 sgl_data->mlli_nents[index] = mlli_nents;
284 if (sgl_data->mlli_nents[index])
285 *sgl_data->mlli_nents[index] = 0;
286 sgl_data->num_of_buffers++;
289 static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
290 unsigned int nents, struct scatterlist *sgl,
291 unsigned int data_len, unsigned int data_offset,
292 bool is_last_table, u32 *mlli_nents)
294 unsigned int index = sgl_data->num_of_buffers;
296 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
297 index, nents, sgl, data_len, is_last_table);
298 sgl_data->nents[index] = nents;
299 sgl_data->entry[index].sgl = sgl;
300 sgl_data->offset[index] = data_offset;
301 sgl_data->total_data_len[index] = data_len;
302 sgl_data->type[index] = DMA_SGL_TYPE;
303 sgl_data->is_last[index] = is_last_table;
304 sgl_data->mlli_nents[index] = mlli_nents;
305 if (sgl_data->mlli_nents[index])
306 *sgl_data->mlli_nents[index] = 0;
307 sgl_data->num_of_buffers++;
310 static int cc_map_sg(struct device *dev, struct scatterlist *sg,
311 unsigned int nbytes, int direction, u32 *nents,
312 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
314 if (sg_is_last(sg)) {
315 /* One entry only case -set to DLLI */
316 if (dma_map_sg(dev, sg, 1, direction) != 1) {
317 dev_err(dev, "dma_map_sg() single buffer failed\n");
320 dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
321 &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
322 sg->offset, sg->length);
326 } else { /*sg_is_last*/
327 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
328 if (*nents > max_sg_nents) {
330 dev_err(dev, "Too many fragments. current %d max %d\n",
331 *nents, max_sg_nents);
334 /* In case of mmu the number of mapped nents might
335 * be changed from the original sgl nents
337 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
338 if (*mapped_nents == 0) {
340 dev_err(dev, "dma_map_sg() sg buffer failed\n");
349 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
350 u8 *config_data, struct buffer_array *sg_data,
351 unsigned int assoclen)
353 dev_dbg(dev, " handle additional data config set to DLLI\n");
354 /* create sg for the current buffer */
355 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
356 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
357 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
358 dev_err(dev, "dma_map_sg() config buffer failed\n");
361 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
362 &sg_dma_address(&areq_ctx->ccm_adata_sg),
363 sg_page(&areq_ctx->ccm_adata_sg),
364 sg_virt(&areq_ctx->ccm_adata_sg),
365 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
366 /* prepare for case of MLLI */
368 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
369 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
375 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
376 u8 *curr_buff, u32 curr_buff_cnt,
377 struct buffer_array *sg_data)
379 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
380 /* create sg for the current buffer */
381 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
382 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
383 dev_err(dev, "dma_map_sg() src buffer failed\n");
386 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
387 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
388 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
389 areq_ctx->buff_sg->length);
390 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
391 areq_ctx->curr_sg = areq_ctx->buff_sg;
392 areq_ctx->in_nents = 0;
393 /* prepare for case of MLLI */
394 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
399 void cc_unmap_cipher_request(struct device *dev, void *ctx,
400 unsigned int ivsize, struct scatterlist *src,
401 struct scatterlist *dst)
403 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
405 if (req_ctx->gen_ctx.iv_dma_addr) {
406 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
407 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
408 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
409 ivsize, DMA_BIDIRECTIONAL);
412 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
413 req_ctx->mlli_params.mlli_virt_addr) {
414 dma_pool_free(req_ctx->mlli_params.curr_pool,
415 req_ctx->mlli_params.mlli_virt_addr,
416 req_ctx->mlli_params.mlli_dma_addr);
419 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
420 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
423 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
424 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
428 int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
429 unsigned int ivsize, unsigned int nbytes,
430 void *info, struct scatterlist *src,
431 struct scatterlist *dst, gfp_t flags)
433 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
434 struct mlli_params *mlli_params = &req_ctx->mlli_params;
435 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
436 struct device *dev = drvdata_to_dev(drvdata);
437 struct buffer_array sg_data;
440 u32 mapped_nents = 0;
442 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
443 mlli_params->curr_pool = NULL;
444 sg_data.num_of_buffers = 0;
448 dump_byte_array("iv", (u8 *)info, ivsize);
449 req_ctx->gen_ctx.iv_dma_addr =
450 dma_map_single(dev, (void *)info,
451 ivsize, DMA_BIDIRECTIONAL);
452 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
453 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
457 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
458 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
460 req_ctx->gen_ctx.iv_dma_addr = 0;
463 /* Map the src SGL */
464 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
465 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
468 if (mapped_nents > 1)
469 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
472 /* Handle inplace operation */
473 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
474 req_ctx->out_nents = 0;
475 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
477 &req_ctx->in_mlli_nents);
481 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
482 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
483 &dummy, &mapped_nents);
486 if (mapped_nents > 1)
487 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
489 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
490 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
492 &req_ctx->in_mlli_nents);
493 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
495 &req_ctx->out_mlli_nents);
499 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
500 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
501 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
506 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
507 cc_dma_buf_type(req_ctx->dma_buf_type));
512 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
516 void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
518 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
519 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
520 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
521 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
522 u32 size_to_unmap = 0;
524 if (areq_ctx->mac_buf_dma_addr) {
525 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
526 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
529 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
530 if (areq_ctx->hkey_dma_addr) {
531 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
532 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
535 if (areq_ctx->gcm_block_len_dma_addr) {
536 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
537 AES_BLOCK_SIZE, DMA_TO_DEVICE);
540 if (areq_ctx->gcm_iv_inc1_dma_addr) {
541 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
542 AES_BLOCK_SIZE, DMA_TO_DEVICE);
545 if (areq_ctx->gcm_iv_inc2_dma_addr) {
546 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
547 AES_BLOCK_SIZE, DMA_TO_DEVICE);
551 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
552 if (areq_ctx->ccm_iv0_dma_addr) {
553 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
554 AES_BLOCK_SIZE, DMA_TO_DEVICE);
557 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
559 if (areq_ctx->gen_ctx.iv_dma_addr) {
560 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
561 hw_iv_size, DMA_BIDIRECTIONAL);
565 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
566 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
567 (areq_ctx->mlli_params.mlli_virt_addr)) {
568 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
569 &areq_ctx->mlli_params.mlli_dma_addr,
570 areq_ctx->mlli_params.mlli_virt_addr);
571 dma_pool_free(areq_ctx->mlli_params.curr_pool,
572 areq_ctx->mlli_params.mlli_virt_addr,
573 areq_ctx->mlli_params.mlli_dma_addr);
576 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
577 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
578 req->assoclen, req->cryptlen);
579 size_to_unmap = req->assoclen + req->cryptlen;
580 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
581 size_to_unmap += areq_ctx->req_authsize;
582 if (areq_ctx->is_gcm4543)
583 size_to_unmap += crypto_aead_ivsize(tfm);
585 dma_unmap_sg(dev, req->src, sg_nents_for_len(req->src, size_to_unmap),
587 if (req->src != req->dst) {
588 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
590 dma_unmap_sg(dev, req->dst,
591 sg_nents_for_len(req->dst, size_to_unmap),
594 if (drvdata->coherent &&
595 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
596 req->src == req->dst) {
597 /* copy back mac from temporary location to deal with possible
598 * data memory overriding that caused by cache coherence
601 cc_copy_mac(dev, req, CC_SG_FROM_BUF);
605 static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
606 unsigned int sgl_nents, unsigned int authsize,
607 u32 last_entry_data_size,
608 bool *is_icv_fragmented)
610 unsigned int icv_max_size = 0;
611 unsigned int icv_required_size = authsize > last_entry_data_size ?
612 (authsize - last_entry_data_size) :
617 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
618 *is_icv_fragmented = false;
622 for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
629 icv_max_size = sgl->length;
631 if (last_entry_data_size > authsize) {
632 /* ICV attached to data in last entry (not fragmented!) */
634 *is_icv_fragmented = false;
635 } else if (last_entry_data_size == authsize) {
636 /* ICV placed in whole last entry (not fragmented!) */
638 *is_icv_fragmented = false;
639 } else if (icv_max_size > icv_required_size) {
641 *is_icv_fragmented = true;
642 } else if (icv_max_size == icv_required_size) {
644 *is_icv_fragmented = true;
646 dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
647 MAX_ICV_NENTS_SUPPORTED);
648 nents = -1; /*unsupported*/
650 dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
651 (*is_icv_fragmented ? "true" : "false"), nents);
656 static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
657 struct aead_request *req,
658 struct buffer_array *sg_data,
659 bool is_last, bool do_chain)
661 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
662 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
663 struct device *dev = drvdata_to_dev(drvdata);
667 areq_ctx->gen_ctx.iv_dma_addr = 0;
671 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
674 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
675 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
676 hw_iv_size, req->iv);
681 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
682 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
683 // TODO: what about CTR?? ask Ron
684 if (do_chain && areq_ctx->plaintext_authenticate_only) {
685 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
686 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
687 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
688 /* Chain to given list */
689 cc_add_buffer_entry(dev, sg_data,
690 (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
691 iv_size_to_authenc, is_last,
692 &areq_ctx->assoc.mlli_nents);
693 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
700 static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
701 struct aead_request *req,
702 struct buffer_array *sg_data,
703 bool is_last, bool do_chain)
705 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
707 u32 mapped_nents = 0;
708 struct scatterlist *current_sg = req->src;
709 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
710 unsigned int sg_index = 0;
711 u32 size_of_assoc = req->assoclen;
712 struct device *dev = drvdata_to_dev(drvdata);
714 if (areq_ctx->is_gcm4543)
715 size_of_assoc += crypto_aead_ivsize(tfm);
719 goto chain_assoc_exit;
722 if (req->assoclen == 0) {
723 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
724 areq_ctx->assoc.nents = 0;
725 areq_ctx->assoc.mlli_nents = 0;
726 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
727 cc_dma_buf_type(areq_ctx->assoc_buff_type),
728 areq_ctx->assoc.nents);
729 goto chain_assoc_exit;
732 //iterate over the sgl to see how many entries are for associated data
733 //it is assumed that if we reach here , the sgl is already mapped
734 sg_index = current_sg->length;
735 //the first entry in the scatter list contains all the associated data
736 if (sg_index > size_of_assoc) {
739 while (sg_index <= size_of_assoc) {
740 current_sg = sg_next(current_sg);
741 /* if have reached the end of the sgl, then this is
745 dev_err(dev, "reached end of sg list. unexpected\n");
748 sg_index += current_sg->length;
752 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
753 dev_err(dev, "Too many fragments. current %d max %d\n",
754 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
757 areq_ctx->assoc.nents = mapped_nents;
759 /* in CCM case we have additional entry for
760 * ccm header configurations
762 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
763 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
764 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
765 (areq_ctx->assoc.nents + 1),
766 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
768 goto chain_assoc_exit;
772 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
773 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
775 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
777 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
778 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
779 cc_dma_buf_type(areq_ctx->assoc_buff_type),
780 areq_ctx->assoc.nents);
781 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
782 req->assoclen, 0, is_last,
783 &areq_ctx->assoc.mlli_nents);
784 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
791 static void cc_prepare_aead_data_dlli(struct aead_request *req,
792 u32 *src_last_bytes, u32 *dst_last_bytes)
794 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
795 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
796 unsigned int authsize = areq_ctx->req_authsize;
798 areq_ctx->is_icv_fragmented = false;
799 if (req->src == req->dst) {
801 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
802 (*src_last_bytes - authsize);
803 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
804 (*src_last_bytes - authsize);
805 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
806 /*NON-INPLACE and DECRYPT*/
807 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
808 (*src_last_bytes - authsize);
809 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
810 (*src_last_bytes - authsize);
812 /*NON-INPLACE and ENCRYPT*/
813 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
814 (*dst_last_bytes - authsize);
815 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
816 (*dst_last_bytes - authsize);
820 static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
821 struct aead_request *req,
822 struct buffer_array *sg_data,
823 u32 *src_last_bytes, u32 *dst_last_bytes,
826 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
827 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
828 unsigned int authsize = areq_ctx->req_authsize;
829 int rc = 0, icv_nents;
830 struct device *dev = drvdata_to_dev(drvdata);
831 struct scatterlist *sg;
833 if (req->src == req->dst) {
835 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
836 areq_ctx->src_sgl, areq_ctx->cryptlen,
837 areq_ctx->src_offset, is_last_table,
838 &areq_ctx->src.mlli_nents);
840 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
842 authsize, *src_last_bytes,
843 &areq_ctx->is_icv_fragmented);
846 goto prepare_data_mlli_exit;
849 if (areq_ctx->is_icv_fragmented) {
850 /* Backup happens only when ICV is fragmented, ICV
851 * verification is made by CPU compare in order to
852 * simplify MAC verification upon request completion
854 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
855 /* In coherent platforms (e.g. ACP)
856 * already copying ICV for any
857 * INPLACE-DECRYPT operation, hence
858 * we must neglect this code.
860 if (!drvdata->coherent)
861 cc_copy_mac(dev, req, CC_SG_TO_BUF);
863 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
865 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
866 areq_ctx->icv_dma_addr =
867 areq_ctx->mac_buf_dma_addr;
869 } else { /* Contig. ICV */
870 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
871 /*Should hanlde if the sg is not contig.*/
872 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
873 (*src_last_bytes - authsize);
874 areq_ctx->icv_virt_addr = sg_virt(sg) +
875 (*src_last_bytes - authsize);
878 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
879 /*NON-INPLACE and DECRYPT*/
880 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
881 areq_ctx->src_sgl, areq_ctx->cryptlen,
882 areq_ctx->src_offset, is_last_table,
883 &areq_ctx->src.mlli_nents);
884 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
885 areq_ctx->dst_sgl, areq_ctx->cryptlen,
886 areq_ctx->dst_offset, is_last_table,
887 &areq_ctx->dst.mlli_nents);
889 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
891 authsize, *src_last_bytes,
892 &areq_ctx->is_icv_fragmented);
895 goto prepare_data_mlli_exit;
898 /* Backup happens only when ICV is fragmented, ICV
899 * verification is made by CPU compare in order to simplify
900 * MAC verification upon request completion
902 if (areq_ctx->is_icv_fragmented) {
903 cc_copy_mac(dev, req, CC_SG_TO_BUF);
904 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
906 } else { /* Contig. ICV */
907 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
908 /*Should hanlde if the sg is not contig.*/
909 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
910 (*src_last_bytes - authsize);
911 areq_ctx->icv_virt_addr = sg_virt(sg) +
912 (*src_last_bytes - authsize);
916 /*NON-INPLACE and ENCRYPT*/
917 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
918 areq_ctx->dst_sgl, areq_ctx->cryptlen,
919 areq_ctx->dst_offset, is_last_table,
920 &areq_ctx->dst.mlli_nents);
921 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
922 areq_ctx->src_sgl, areq_ctx->cryptlen,
923 areq_ctx->src_offset, is_last_table,
924 &areq_ctx->src.mlli_nents);
926 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
928 authsize, *dst_last_bytes,
929 &areq_ctx->is_icv_fragmented);
932 goto prepare_data_mlli_exit;
935 if (!areq_ctx->is_icv_fragmented) {
936 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
938 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
939 (*dst_last_bytes - authsize);
940 areq_ctx->icv_virt_addr = sg_virt(sg) +
941 (*dst_last_bytes - authsize);
943 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
944 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
948 prepare_data_mlli_exit:
952 static int cc_aead_chain_data(struct cc_drvdata *drvdata,
953 struct aead_request *req,
954 struct buffer_array *sg_data,
955 bool is_last_table, bool do_chain)
957 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
958 struct device *dev = drvdata_to_dev(drvdata);
959 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
960 unsigned int authsize = areq_ctx->req_authsize;
961 unsigned int src_last_bytes = 0, dst_last_bytes = 0;
963 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
965 /* non-inplace mode */
966 unsigned int size_for_map = req->assoclen + req->cryptlen;
967 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
969 bool is_gcm4543 = areq_ctx->is_gcm4543;
970 u32 size_to_skip = req->assoclen;
973 size_to_skip += crypto_aead_ivsize(tfm);
975 offset = size_to_skip;
980 areq_ctx->src_sgl = req->src;
981 areq_ctx->dst_sgl = req->dst;
984 size_for_map += crypto_aead_ivsize(tfm);
986 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
988 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
990 sg_index = areq_ctx->src_sgl->length;
991 //check where the data starts
992 while (sg_index <= size_to_skip) {
993 offset -= areq_ctx->src_sgl->length;
994 areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
995 //if have reached the end of the sgl, then this is unexpected
996 if (!areq_ctx->src_sgl) {
997 dev_err(dev, "reached end of sg list. unexpected\n");
1000 sg_index += areq_ctx->src_sgl->length;
1003 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1004 dev_err(dev, "Too many fragments. current %d max %d\n",
1005 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1009 areq_ctx->src.nents = src_mapped_nents;
1011 areq_ctx->src_offset = offset;
1013 if (req->src != req->dst) {
1014 size_for_map = req->assoclen + req->cryptlen;
1015 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1018 size_for_map += crypto_aead_ivsize(tfm);
1020 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
1021 &areq_ctx->dst.nents,
1022 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1025 goto chain_data_exit;
1028 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1030 sg_index = areq_ctx->dst_sgl->length;
1031 offset = size_to_skip;
1033 //check where the data starts
1034 while (sg_index <= size_to_skip) {
1035 offset -= areq_ctx->dst_sgl->length;
1036 areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1037 //if have reached the end of the sgl, then this is unexpected
1038 if (!areq_ctx->dst_sgl) {
1039 dev_err(dev, "reached end of sg list. unexpected\n");
1042 sg_index += areq_ctx->dst_sgl->length;
1045 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1046 dev_err(dev, "Too many fragments. current %d max %d\n",
1047 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1050 areq_ctx->dst.nents = dst_mapped_nents;
1051 areq_ctx->dst_offset = offset;
1052 if (src_mapped_nents > 1 ||
1053 dst_mapped_nents > 1 ||
1055 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1056 rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1058 &dst_last_bytes, is_last_table);
1060 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1061 cc_prepare_aead_data_dlli(req, &src_last_bytes,
1069 static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
1070 struct aead_request *req)
1072 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1073 u32 curr_mlli_size = 0;
1075 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
1076 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1077 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1078 LLI_ENTRY_BYTE_SIZE;
1081 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1082 /*Inplace case dst nents equal to src nents*/
1083 if (req->src == req->dst) {
1084 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1085 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1087 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1088 if (!areq_ctx->is_single_pass)
1089 areq_ctx->assoc.mlli_nents +=
1090 areq_ctx->src.mlli_nents;
1092 if (areq_ctx->gen_ctx.op_type ==
1093 DRV_CRYPTO_DIRECTION_DECRYPT) {
1094 areq_ctx->src.sram_addr =
1095 drvdata->mlli_sram_addr +
1097 areq_ctx->dst.sram_addr =
1098 areq_ctx->src.sram_addr +
1099 areq_ctx->src.mlli_nents *
1100 LLI_ENTRY_BYTE_SIZE;
1101 if (!areq_ctx->is_single_pass)
1102 areq_ctx->assoc.mlli_nents +=
1103 areq_ctx->src.mlli_nents;
1105 areq_ctx->dst.sram_addr =
1106 drvdata->mlli_sram_addr +
1108 areq_ctx->src.sram_addr =
1109 areq_ctx->dst.sram_addr +
1110 areq_ctx->dst.mlli_nents *
1111 LLI_ENTRY_BYTE_SIZE;
1112 if (!areq_ctx->is_single_pass)
1113 areq_ctx->assoc.mlli_nents +=
1114 areq_ctx->dst.mlli_nents;
1120 int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1122 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1123 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1124 struct device *dev = drvdata_to_dev(drvdata);
1125 struct buffer_array sg_data;
1126 unsigned int authsize = areq_ctx->req_authsize;
1127 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1129 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1130 bool is_gcm4543 = areq_ctx->is_gcm4543;
1131 dma_addr_t dma_addr;
1132 u32 mapped_nents = 0;
1133 u32 dummy = 0; /*used for the assoc data fragments */
1134 u32 size_to_map = 0;
1135 gfp_t flags = cc_gfp_flags(&req->base);
1137 mlli_params->curr_pool = NULL;
1138 sg_data.num_of_buffers = 0;
1140 /* copy mac to a temporary location to deal with possible
1141 * data memory overriding that caused by cache coherence problem.
1143 if (drvdata->coherent &&
1144 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1145 req->src == req->dst)
1146 cc_copy_mac(dev, req, CC_SG_TO_BUF);
1148 /* cacluate the size for cipher remove ICV in decrypt*/
1149 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1150 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1152 (req->cryptlen - authsize);
1154 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1156 if (dma_mapping_error(dev, dma_addr)) {
1157 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1158 MAX_MAC_SIZE, areq_ctx->mac_buf);
1160 goto aead_map_failure;
1162 areq_ctx->mac_buf_dma_addr = dma_addr;
1164 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1165 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1167 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1170 if (dma_mapping_error(dev, dma_addr)) {
1171 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1172 AES_BLOCK_SIZE, addr);
1173 areq_ctx->ccm_iv0_dma_addr = 0;
1175 goto aead_map_failure;
1177 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1179 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1180 &sg_data, req->assoclen);
1182 goto aead_map_failure;
1185 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1186 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1188 if (dma_mapping_error(dev, dma_addr)) {
1189 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1190 AES_BLOCK_SIZE, areq_ctx->hkey);
1192 goto aead_map_failure;
1194 areq_ctx->hkey_dma_addr = dma_addr;
1196 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1197 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1198 if (dma_mapping_error(dev, dma_addr)) {
1199 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1200 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1202 goto aead_map_failure;
1204 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1206 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1207 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1209 if (dma_mapping_error(dev, dma_addr)) {
1210 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1211 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1212 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1214 goto aead_map_failure;
1216 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1218 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1219 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1221 if (dma_mapping_error(dev, dma_addr)) {
1222 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1223 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1224 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1226 goto aead_map_failure;
1228 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1231 size_to_map = req->cryptlen + req->assoclen;
1232 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1233 size_to_map += authsize;
1236 size_to_map += crypto_aead_ivsize(tfm);
1237 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1238 &areq_ctx->src.nents,
1239 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1240 LLI_MAX_NUM_OF_DATA_ENTRIES),
1241 &dummy, &mapped_nents);
1243 goto aead_map_failure;
1245 if (areq_ctx->is_single_pass) {
1247 * Create MLLI table for:
1250 * Note: IV is contg. buffer (not an SGL)
1252 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1254 goto aead_map_failure;
1255 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1257 goto aead_map_failure;
1258 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1260 goto aead_map_failure;
1261 } else { /* DOUBLE-PASS flow */
1263 * Prepare MLLI table(s) in this order:
1265 * If ENCRYPT/DECRYPT (inplace):
1266 * (1) MLLI table for assoc
1267 * (2) IV entry (chained right after end of assoc)
1268 * (3) MLLI for src/dst (inplace operation)
1270 * If ENCRYPT (non-inplace)
1271 * (1) MLLI table for assoc
1272 * (2) IV entry (chained right after end of assoc)
1276 * If DECRYPT (non-inplace)
1277 * (1) MLLI table for assoc
1278 * (2) IV entry (chained right after end of assoc)
1282 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1284 goto aead_map_failure;
1285 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1287 goto aead_map_failure;
1288 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1290 goto aead_map_failure;
1293 /* Mlli support -start building the MLLI according to the above
1296 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1297 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1298 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1299 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1301 goto aead_map_failure;
1303 cc_update_aead_mlli_nents(drvdata, req);
1304 dev_dbg(dev, "assoc params mn %d\n",
1305 areq_ctx->assoc.mlli_nents);
1306 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1307 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1312 cc_unmap_aead_request(dev, req);
1316 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1317 struct scatterlist *src, unsigned int nbytes,
1318 bool do_update, gfp_t flags)
1320 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1321 struct device *dev = drvdata_to_dev(drvdata);
1322 u8 *curr_buff = cc_hash_buf(areq_ctx);
1323 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1324 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1325 struct buffer_array sg_data;
1326 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1329 u32 mapped_nents = 0;
1331 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1332 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1333 /* Init the type of the dma buffer */
1334 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1335 mlli_params->curr_pool = NULL;
1336 sg_data.num_of_buffers = 0;
1337 areq_ctx->in_nents = 0;
1339 if (nbytes == 0 && *curr_buff_cnt == 0) {
1344 /*TODO: copy data in case that buffer is enough for operation */
1345 /* map the previous buffer */
1346 if (*curr_buff_cnt) {
1347 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1353 if (src && nbytes > 0 && do_update) {
1354 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1355 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1356 &dummy, &mapped_nents);
1358 goto unmap_curr_buff;
1359 if (src && mapped_nents == 1 &&
1360 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1361 memcpy(areq_ctx->buff_sg, src,
1362 sizeof(struct scatterlist));
1363 areq_ctx->buff_sg->length = nbytes;
1364 areq_ctx->curr_sg = areq_ctx->buff_sg;
1365 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1367 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1372 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1373 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1374 /* add the src data to the sg_data */
1375 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1376 0, true, &areq_ctx->mlli_nents);
1377 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1379 goto fail_unmap_din;
1381 /* change the buffer index for the unmap function */
1382 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1383 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1384 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1388 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1392 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1397 int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1398 struct scatterlist *src, unsigned int nbytes,
1399 unsigned int block_size, gfp_t flags)
1401 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1402 struct device *dev = drvdata_to_dev(drvdata);
1403 u8 *curr_buff = cc_hash_buf(areq_ctx);
1404 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1405 u8 *next_buff = cc_next_buf(areq_ctx);
1406 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1407 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1408 unsigned int update_data_len;
1409 u32 total_in_len = nbytes + *curr_buff_cnt;
1410 struct buffer_array sg_data;
1411 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1412 unsigned int swap_index = 0;
1415 u32 mapped_nents = 0;
1417 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1418 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1419 /* Init the type of the dma buffer */
1420 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1421 mlli_params->curr_pool = NULL;
1422 areq_ctx->curr_sg = NULL;
1423 sg_data.num_of_buffers = 0;
1424 areq_ctx->in_nents = 0;
1426 if (total_in_len < block_size) {
1427 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1428 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1429 areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1430 sg_copy_to_buffer(src, areq_ctx->in_nents,
1431 &curr_buff[*curr_buff_cnt], nbytes);
1432 *curr_buff_cnt += nbytes;
1436 /* Calculate the residue size*/
1437 *next_buff_cnt = total_in_len & (block_size - 1);
1438 /* update data len */
1439 update_data_len = total_in_len - *next_buff_cnt;
1441 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1442 *next_buff_cnt, update_data_len);
1444 /* Copy the new residue to next buffer */
1445 if (*next_buff_cnt) {
1446 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1447 next_buff, (update_data_len - *curr_buff_cnt),
1449 cc_copy_sg_portion(dev, next_buff, src,
1450 (update_data_len - *curr_buff_cnt),
1451 nbytes, CC_SG_TO_BUF);
1452 /* change the buffer index for next operation */
1456 if (*curr_buff_cnt) {
1457 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1461 /* change the buffer index for next operation */
1465 if (update_data_len > *curr_buff_cnt) {
1466 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1467 DMA_TO_DEVICE, &areq_ctx->in_nents,
1468 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1471 goto unmap_curr_buff;
1472 if (mapped_nents == 1 &&
1473 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1474 /* only one entry in the SG and no previous data */
1475 memcpy(areq_ctx->buff_sg, src,
1476 sizeof(struct scatterlist));
1477 areq_ctx->buff_sg->length = update_data_len;
1478 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1479 areq_ctx->curr_sg = areq_ctx->buff_sg;
1481 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1485 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1486 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1487 /* add the src data to the sg_data */
1488 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1489 (update_data_len - *curr_buff_cnt), 0, true,
1490 &areq_ctx->mlli_nents);
1491 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1493 goto fail_unmap_din;
1495 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1500 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1504 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1509 void cc_unmap_hash_request(struct device *dev, void *ctx,
1510 struct scatterlist *src, bool do_revert)
1512 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1513 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1515 /*In case a pool was set, a table was
1516 *allocated and should be released
1518 if (areq_ctx->mlli_params.curr_pool) {
1519 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1520 &areq_ctx->mlli_params.mlli_dma_addr,
1521 areq_ctx->mlli_params.mlli_virt_addr);
1522 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1523 areq_ctx->mlli_params.mlli_virt_addr,
1524 areq_ctx->mlli_params.mlli_dma_addr);
1527 if (src && areq_ctx->in_nents) {
1528 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1529 sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1530 dma_unmap_sg(dev, src,
1531 areq_ctx->in_nents, DMA_TO_DEVICE);
1535 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1536 sg_virt(areq_ctx->buff_sg),
1537 &sg_dma_address(areq_ctx->buff_sg),
1538 sg_dma_len(areq_ctx->buff_sg));
1539 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1541 /* clean the previous data length for update
1546 areq_ctx->buff_index ^= 1;
1551 int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1553 struct buff_mgr_handle *buff_mgr_handle;
1554 struct device *dev = drvdata_to_dev(drvdata);
1556 buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1557 if (!buff_mgr_handle)
1560 drvdata->buff_mgr_handle = buff_mgr_handle;
1562 buff_mgr_handle->mlli_buffs_pool =
1563 dma_pool_create("dx_single_mlli_tables", dev,
1564 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1565 LLI_ENTRY_BYTE_SIZE,
1566 MLLI_TABLE_MIN_ALIGNMENT, 0);
1568 if (!buff_mgr_handle->mlli_buffs_pool)
1574 cc_buffer_mgr_fini(drvdata);
1578 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1580 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1582 if (buff_mgr_handle) {
1583 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1584 kfree(drvdata->buff_mgr_handle);
1585 drvdata->buff_mgr_handle = NULL;