1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Marvell
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/workqueue.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
27 static u32 max_rings = EIP197_MAX_RINGS;
28 module_param(max_rings, uint, 0644);
29 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
31 static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
36 * Map all interfaces/rings to register index 0
37 * so they can share contexts. Without this, the EIP197 will
38 * assume each interface/ring to be in its own memory domain
39 * i.e. have its own subset of UNIQUE memory addresses.
40 * Which would cause records with the SAME memory address to
41 * use DIFFERENT cache buffers, causing both poor cache utilization
42 * AND serious coherence/invalidation issues.
44 for (i = 0; i < 4; i++)
45 writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
48 * Initialize other virtualization regs for cache
49 * These may not be in their reset state ...
51 for (i = 0; i < priv->config.rings; i++) {
52 writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
53 writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
54 writel(EIP197_FLUE_CONFIG_MAGIC,
55 priv->base + EIP197_FLUE_CONFIG(i));
57 writel(0, priv->base + EIP197_FLUE_OFFSETS);
58 writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
61 static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
62 u32 addrmid, int *actbank)
67 curbank = addrmid >> 16;
68 if (curbank != *actbank) {
69 val = readl(priv->base + EIP197_CS_RAM_CTRL);
70 val = (val & ~EIP197_CS_BANKSEL_MASK) |
71 (curbank << EIP197_CS_BANKSEL_OFS);
72 writel(val, priv->base + EIP197_CS_RAM_CTRL);
77 static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
78 int maxbanks, u32 probemask, u32 stride)
80 u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
84 * And probe the actual size of the physically attached cache data RAM
85 * Using a binary subdivision algorithm downto 32 byte cache lines.
87 addrhi = 1 << (16 + maxbanks);
89 actbank = min(maxbanks - 1, 0);
90 while ((addrhi - addrlo) > stride) {
91 /* write marker to lowest address in top half */
92 addrmid = (addrhi + addrlo) >> 1;
93 marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
94 eip197_trc_cache_banksel(priv, addrmid, &actbank);
96 priv->base + EIP197_CLASSIFICATION_RAMS +
99 /* write invalid markers to possible aliases */
100 delta = 1 << __fls(addrmid);
101 while (delta >= stride) {
102 addralias = addrmid - delta;
103 eip197_trc_cache_banksel(priv, addralias, &actbank);
105 priv->base + EIP197_CLASSIFICATION_RAMS +
106 (addralias & 0xffff));
110 /* read back marker from top half */
111 eip197_trc_cache_banksel(priv, addrmid, &actbank);
112 val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
115 if ((val & probemask) == marker)
116 /* read back correct, continue with top half */
119 /* not read back correct, continue with bottom half */
125 static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
126 int cs_rc_max, int cs_ht_wc)
129 u32 htable_offset, val, offset;
131 /* Clear all records in administration RAM */
132 for (i = 0; i < cs_rc_max; i++) {
133 offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
135 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
136 EIP197_CS_RC_PREV(EIP197_RC_NULL),
137 priv->base + offset);
139 val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
141 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
142 else if (i == cs_rc_max - 1)
143 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
144 writel(val, priv->base + offset + 4);
145 /* must also initialize the address key due to ECC! */
146 writel(0, priv->base + offset + 8);
147 writel(0, priv->base + offset + 12);
150 /* Clear the hash table entries */
151 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
152 for (i = 0; i < cs_ht_wc; i++)
153 writel(GENMASK(29, 0),
154 priv->base + EIP197_CLASSIFICATION_RAMS +
155 htable_offset + i * sizeof(u32));
158 static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
160 u32 val, dsize, asize;
161 int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
162 int cs_rc_abs_max, cs_ht_sz;
165 /* Setup (dummy) virtualization for cache */
166 eip197_trc_cache_setupvirt(priv);
169 * Enable the record cache memory access and
170 * probe the bank select width
172 val = readl(priv->base + EIP197_CS_RAM_CTRL);
173 val &= ~EIP197_TRC_ENABLE_MASK;
174 val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
175 writel(val, priv->base + EIP197_CS_RAM_CTRL);
176 val = readl(priv->base + EIP197_CS_RAM_CTRL);
177 maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
179 /* Clear all ECC errors */
180 writel(0, priv->base + EIP197_TRC_ECCCTRL);
183 * Make sure the cache memory is accessible by taking record cache into
184 * reset. Need data memory access here, not admin access.
186 val = readl(priv->base + EIP197_TRC_PARAMS);
187 val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
188 writel(val, priv->base + EIP197_TRC_PARAMS);
190 /* Probed data RAM size in bytes */
191 dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
194 * Now probe the administration RAM size pretty much the same way
195 * Except that only the lower 30 bits are writable and we don't need
198 val = readl(priv->base + EIP197_TRC_PARAMS);
199 /* admin access now */
200 val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
201 writel(val, priv->base + EIP197_TRC_PARAMS);
203 /* Probed admin RAM size in admin words */
204 asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
206 /* Clear any ECC errors detected while probing! */
207 writel(0, priv->base + EIP197_TRC_ECCCTRL);
209 /* Sanity check probing results */
210 if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
211 dev_err(priv->dev, "Record cache probing failed (%d,%d).",
217 * Determine optimal configuration from RAM sizes
218 * Note that we assume that the physical RAM configuration is sane
219 * Therefore, we don't do any parameter error checking here ...
222 /* For now, just use a single record format covering everything */
223 cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
224 cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
227 * Step #1: How many records will physically fit?
228 * Hard upper limit is 1023!
230 cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
231 /* Step #2: Need at least 2 words in the admin RAM per record */
232 cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
233 /* Step #3: Determine log2 of hash table size */
234 cs_ht_sz = __fls(asize - cs_rc_max) - 2;
235 /* Step #4: determine current size of hash table in dwords */
236 cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
237 /* Step #5: add back excess words and see if we can fit more records */
238 cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
240 /* Clear the cache RAMs */
241 eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
243 /* Disable the record cache memory access */
244 val = readl(priv->base + EIP197_CS_RAM_CTRL);
245 val &= ~EIP197_TRC_ENABLE_MASK;
246 writel(val, priv->base + EIP197_CS_RAM_CTRL);
248 /* Write head and tail pointers of the record free chain */
249 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
250 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
251 writel(val, priv->base + EIP197_TRC_FREECHAIN);
253 /* Configure the record cache #1 */
254 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
255 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
256 writel(val, priv->base + EIP197_TRC_PARAMS2);
258 /* Configure the record cache #2 */
259 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
260 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
261 EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
262 writel(val, priv->base + EIP197_TRC_PARAMS);
264 dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
265 dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
269 static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
274 for (pe = 0; pe < priv->config.pes; pe++) {
275 /* Configure the token FIFO's */
276 writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
277 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
279 /* Clear the ICE scratchpad memory */
280 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
281 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
282 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
283 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
284 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
285 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
287 /* clear the scratchpad RAM using 32 bit writes only */
288 for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
289 writel(0, EIP197_PE(priv) +
290 EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
292 /* Reset the IFPP engine to make its program mem accessible */
293 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
294 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
295 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
296 EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
298 /* Reset the IPUE engine to make its program mem accessible */
299 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
300 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
301 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
302 EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
304 /* Enable access to all IFPP program memories */
305 writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
306 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
311 static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
312 const struct firmware *fw)
314 const u32 *data = (const u32 *)fw->data;
317 /* Write the firmware */
318 for (i = 0; i < fw->size / sizeof(u32); i++)
319 writel(be32_to_cpu(data[i]),
320 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
322 /* Exclude final 2 NOPs from size */
323 return i - EIP197_FW_TERMINAL_NOPS;
327 * If FW is actual production firmware, then poll for its initialization
328 * to complete and check if it is good for the HW, otherwise just return OK.
330 static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
336 pollofs = EIP197_FW_FPP_READY;
338 pollofs = EIP197_FW_PUE_READY;
340 for (pe = 0; pe < priv->config.pes; pe++) {
341 base = EIP197_PE_ICE_SCRATCH_RAM(pe);
342 pollcnt = EIP197_FW_START_POLLCNT;
344 (readl_relaxed(EIP197_PE(priv) + base +
349 dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
357 static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
358 int ipuesz, int ifppsz, int minifw)
363 for (pe = 0; pe < priv->config.pes; pe++) {
364 /* Disable access to all program memory */
365 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
367 /* Start IFPP microengines */
371 val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
372 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
373 EIP197_PE_ICE_UENG_DEBUG_RESET;
374 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
376 /* Start IPUE microengines */
380 val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
381 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
382 EIP197_PE_ICE_UENG_DEBUG_RESET;
383 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
386 /* For miniFW startup, there is no initialization, so always succeed */
390 /* Wait until all the firmwares have properly started up */
391 if (!poll_fw_ready(priv, 1))
393 if (!poll_fw_ready(priv, 0))
399 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
401 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
402 const struct firmware *fw[FW_NB];
403 char fw_path[37], *dir = NULL;
404 int i, j, ret = 0, pe;
405 int ipuesz, ifppsz, minifw = 0;
407 if (priv->version == EIP197D_MRVL)
409 else if (priv->version == EIP197B_MRVL ||
410 priv->version == EIP197_DEVBRD)
416 for (i = 0; i < FW_NB; i++) {
417 snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
418 ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
420 if (minifw || priv->version != EIP197B_MRVL)
423 /* Fallback to the old firmware location for the
426 ret = firmware_request_nowarn(&fw[i], fw_name[i],
433 eip197_init_firmware(priv);
435 ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
437 /* Enable access to IPUE program memories */
438 for (pe = 0; pe < priv->config.pes; pe++)
439 writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
440 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
442 ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
444 if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
445 dev_dbg(priv->dev, "Firmware loaded successfully\n");
452 for (j = 0; j < i; j++)
453 release_firmware(fw[j]);
456 /* Retry with minifw path */
457 dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
458 dir = "eip197_minifw";
463 dev_dbg(priv->dev, "Firmware load failed.\n");
468 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
470 u32 cd_size_rnd, val;
473 cd_size_rnd = (priv->config.cd_size +
474 (BIT(priv->hwconfig.hwdataw) - 1)) >>
475 priv->hwconfig.hwdataw;
476 /* determine number of CD's we can fetch into the CD FIFO as 1 block */
477 if (priv->flags & SAFEXCEL_HW_EIP197) {
478 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
479 cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
480 cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
481 (priv->config.pes * EIP197_FETCH_DEPTH));
483 /* for the EIP97, just fetch all that fits minus 1 */
484 cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
488 * Since we're using command desc's way larger than formally specified,
489 * we need to check whether we can fit even 1 for low-end EIP196's!
492 dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
496 for (i = 0; i < priv->config.rings; i++) {
497 /* ring base address */
498 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
499 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
500 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
501 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
503 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
504 priv->config.cd_size,
505 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
506 writel(((cd_fetch_cnt *
507 (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
508 (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
509 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
511 /* Configure DMA tx control */
512 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
513 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
514 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
516 /* clear any pending interrupt */
517 writel(GENMASK(5, 0),
518 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
524 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
526 u32 rd_size_rnd, val;
529 /* determine number of RD's we can fetch into the FIFO as one block */
530 rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
531 (BIT(priv->hwconfig.hwdataw) - 1)) >>
532 priv->hwconfig.hwdataw;
533 if (priv->flags & SAFEXCEL_HW_EIP197) {
534 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
535 rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
536 rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
537 (priv->config.pes * EIP197_FETCH_DEPTH));
539 /* for the EIP97, just fetch all that fits minus 1 */
540 rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
544 for (i = 0; i < priv->config.rings; i++) {
545 /* ring base address */
546 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
547 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
548 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
549 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
551 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
552 priv->config.rd_size,
553 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
555 writel(((rd_fetch_cnt *
556 (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
557 (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
558 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
560 /* Configure DMA tx control */
561 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
562 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
563 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
565 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
567 /* clear any pending interrupt */
568 writel(GENMASK(7, 0),
569 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
571 /* enable ring interrupt */
572 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
573 val |= EIP197_RDR_IRQ(i);
574 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
580 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
583 int i, ret, pe, opbuflo, opbufhi;
585 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
586 priv->config.pes, priv->config.rings);
589 * For EIP197's only set maximum number of TX commands to 2^5 = 32
590 * Skip for the EIP97 as it does not have this field.
592 if (priv->flags & SAFEXCEL_HW_EIP197) {
593 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
594 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
595 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
598 /* Configure wr/rd cache values */
599 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
600 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
601 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
603 /* Interrupts reset */
605 /* Disable all global interrupts */
606 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
608 /* Clear any pending interrupt */
609 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
611 /* Processing Engine configuration */
612 for (pe = 0; pe < priv->config.pes; pe++) {
613 /* Data Fetch Engine configuration */
615 /* Reset all DFE threads */
616 writel(EIP197_DxE_THR_CTRL_RESET_PE,
617 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
619 if (priv->flags & EIP197_PE_ARB)
620 /* Reset HIA input interface arbiter (if present) */
621 writel(EIP197_HIA_RA_PE_CTRL_RESET,
622 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
624 /* DMA transfer size to use */
625 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
626 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
627 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
628 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
629 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
630 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
631 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
632 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
634 /* Leave the DFE threads reset state */
635 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
637 /* Configure the processing engine thresholds */
638 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
639 EIP197_PE_IN_xBUF_THRES_MAX(9),
640 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
641 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
642 EIP197_PE_IN_xBUF_THRES_MAX(7),
643 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
645 if (priv->flags & SAFEXCEL_HW_EIP197)
646 /* enable HIA input interface arbiter and rings */
647 writel(EIP197_HIA_RA_PE_CTRL_EN |
648 GENMASK(priv->config.rings - 1, 0),
649 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
651 /* Data Store Engine configuration */
653 /* Reset all DSE threads */
654 writel(EIP197_DxE_THR_CTRL_RESET_PE,
655 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
657 /* Wait for all DSE threads to complete */
658 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
659 GENMASK(15, 12)) != GENMASK(15, 12))
662 /* DMA transfer size to use */
663 if (priv->hwconfig.hwnumpes > 4) {
670 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
671 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
672 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
673 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
674 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
675 /* FIXME: instability issues can occur for EIP97 but disabling
676 * it impacts performance.
678 if (priv->flags & SAFEXCEL_HW_EIP197)
679 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
680 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
682 /* Leave the DSE threads reset state */
683 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
685 /* Configure the procesing engine thresholds */
686 writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
687 EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
688 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
690 /* Processing Engine configuration */
692 /* Token & context configuration */
693 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
694 EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
695 EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
696 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
698 /* H/W capabilities selection: just enable everything */
699 writel(EIP197_FUNCTION_ALL,
700 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
701 writel(EIP197_FUNCTION_ALL,
702 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
705 /* Command Descriptor Rings prepare */
706 for (i = 0; i < priv->config.rings; i++) {
707 /* Clear interrupts for this ring */
708 writel(GENMASK(31, 0),
709 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
711 /* Disable external triggering */
712 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
714 /* Clear the pending prepared counter */
715 writel(EIP197_xDR_PREP_CLR_COUNT,
716 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
718 /* Clear the pending processed counter */
719 writel(EIP197_xDR_PROC_CLR_COUNT,
720 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
723 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
725 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
727 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
728 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
731 /* Result Descriptor Ring prepare */
732 for (i = 0; i < priv->config.rings; i++) {
733 /* Disable external triggering*/
734 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
736 /* Clear the pending prepared counter */
737 writel(EIP197_xDR_PREP_CLR_COUNT,
738 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
740 /* Clear the pending processed counter */
741 writel(EIP197_xDR_PROC_CLR_COUNT,
742 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
745 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
747 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
750 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
751 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
754 for (pe = 0; pe < priv->config.pes; pe++) {
755 /* Enable command descriptor rings */
756 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
757 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
759 /* Enable result descriptor rings */
760 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
761 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
764 /* Clear any HIA interrupt */
765 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
767 if (priv->flags & EIP197_SIMPLE_TRC) {
768 writel(EIP197_STRC_CONFIG_INIT |
769 EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
770 EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
771 priv->base + EIP197_STRC_CONFIG);
772 writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
773 EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
774 } else if (priv->flags & SAFEXCEL_HW_EIP197) {
775 ret = eip197_trc_cache_init(priv);
780 if (priv->flags & EIP197_ICE) {
781 ret = eip197_load_firmwares(priv);
786 return safexcel_hw_setup_cdesc_rings(priv) ?:
787 safexcel_hw_setup_rdesc_rings(priv) ?:
791 /* Called with ring's lock taken */
792 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
795 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
800 /* Configure when we want an interrupt */
801 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
802 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
803 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
806 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
808 struct crypto_async_request *req, *backlog;
809 struct safexcel_context *ctx;
810 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
812 /* If a request wasn't properly dequeued because of a lack of resources,
813 * proceeded it first,
815 req = priv->ring[ring].req;
816 backlog = priv->ring[ring].backlog;
821 spin_lock_bh(&priv->ring[ring].queue_lock);
822 backlog = crypto_get_backlog(&priv->ring[ring].queue);
823 req = crypto_dequeue_request(&priv->ring[ring].queue);
824 spin_unlock_bh(&priv->ring[ring].queue_lock);
827 priv->ring[ring].req = NULL;
828 priv->ring[ring].backlog = NULL;
833 ctx = crypto_tfm_ctx(req->tfm);
834 ret = ctx->send(req, ring, &commands, &results);
839 backlog->complete(backlog, -EINPROGRESS);
841 /* In case the send() helper did not issue any command to push
842 * to the engine because the input data was cached, continue to
843 * dequeue other requests as this is valid and not an error.
845 if (!commands && !results)
854 /* Not enough resources to handle all the requests. Bail out and save
855 * the request and the backlog for the next dequeue call (per-ring).
857 priv->ring[ring].req = req;
858 priv->ring[ring].backlog = backlog;
864 spin_lock_bh(&priv->ring[ring].lock);
866 priv->ring[ring].requests += nreq;
868 if (!priv->ring[ring].busy) {
869 safexcel_try_push_requests(priv, ring);
870 priv->ring[ring].busy = true;
873 spin_unlock_bh(&priv->ring[ring].lock);
875 /* let the RDR know we have pending descriptors */
876 writel((rdesc * priv->config.rd_offset),
877 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
879 /* let the CDR know we have pending descriptors */
880 writel((cdesc * priv->config.cd_offset),
881 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
884 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
887 struct safexcel_result_desc *rdesc = rdp;
888 struct result_data_desc *result_data = rdp + priv->config.res_offset;
890 if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
891 ((!rdesc->descriptor_overflow) &&
892 (!rdesc->buffer_overflow) &&
893 (!result_data->error_code))))
896 if (rdesc->descriptor_overflow)
897 dev_err(priv->dev, "Descriptor overflow detected");
899 if (rdesc->buffer_overflow)
900 dev_err(priv->dev, "Buffer overflow detected");
902 if (result_data->error_code & 0x4066) {
903 /* Fatal error (bits 1,2,5,6 & 14) */
905 "result descriptor error (%x)",
906 result_data->error_code);
909 } else if (result_data->error_code &
910 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
912 * Give priority over authentication fails:
913 * Blocksize, length & overflow errors,
914 * something wrong with the input!
917 } else if (result_data->error_code & BIT(9)) {
918 /* Authentication failed */
922 /* All other non-fatal errors */
926 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
928 struct safexcel_result_desc *rdesc,
929 struct crypto_async_request *req)
931 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
933 priv->ring[ring].rdr_req[i] = req;
936 inline struct crypto_async_request *
937 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
939 int i = safexcel_ring_first_rdr_index(priv, ring);
941 return priv->ring[ring].rdr_req[i];
944 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
946 struct safexcel_command_desc *cdesc;
948 /* Acknowledge the command descriptors */
950 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
953 "Could not retrieve the command descriptor\n");
956 } while (!cdesc->last_seg);
959 void safexcel_inv_complete(struct crypto_async_request *req, int error)
961 struct safexcel_inv_result *result = req->data;
963 if (error == -EINPROGRESS)
966 result->error = error;
967 complete(&result->completion);
970 int safexcel_invalidate_cache(struct crypto_async_request *async,
971 struct safexcel_crypto_priv *priv,
972 dma_addr_t ctxr_dma, int ring)
974 struct safexcel_command_desc *cdesc;
975 struct safexcel_result_desc *rdesc;
978 /* Prepare command descriptor */
979 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
981 return PTR_ERR(cdesc);
983 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
984 cdesc->control_data.options = 0;
985 cdesc->control_data.refresh = 0;
986 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
988 /* Prepare result descriptor */
989 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
992 ret = PTR_ERR(rdesc);
996 safexcel_rdr_req_set(priv, ring, rdesc, async);
1001 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
1006 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
1009 struct crypto_async_request *req;
1010 struct safexcel_context *ctx;
1011 int ret, i, nreq, ndesc, tot_descs, handled = 0;
1012 bool should_complete;
1017 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1018 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
1019 nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
1023 for (i = 0; i < nreq; i++) {
1024 req = safexcel_rdr_req_get(priv, ring);
1026 ctx = crypto_tfm_ctx(req->tfm);
1027 ndesc = ctx->handle_result(priv, ring, req,
1028 &should_complete, &ret);
1030 dev_err(priv->dev, "failed to handle result (%d)\n",
1035 if (should_complete) {
1037 req->complete(req, ret);
1047 writel(EIP197_xDR_PROC_xD_PKT(i) |
1048 (tot_descs * priv->config.rd_offset),
1049 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1051 /* If the number of requests overflowed the counter, try to proceed more
1054 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
1055 goto handle_results;
1058 spin_lock_bh(&priv->ring[ring].lock);
1060 priv->ring[ring].requests -= handled;
1061 safexcel_try_push_requests(priv, ring);
1063 if (!priv->ring[ring].requests)
1064 priv->ring[ring].busy = false;
1066 spin_unlock_bh(&priv->ring[ring].lock);
1069 static void safexcel_dequeue_work(struct work_struct *work)
1071 struct safexcel_work_data *data =
1072 container_of(work, struct safexcel_work_data, work);
1074 safexcel_dequeue(data->priv, data->ring);
1077 struct safexcel_ring_irq_data {
1078 struct safexcel_crypto_priv *priv;
1082 static irqreturn_t safexcel_irq_ring(int irq, void *data)
1084 struct safexcel_ring_irq_data *irq_data = data;
1085 struct safexcel_crypto_priv *priv = irq_data->priv;
1086 int ring = irq_data->ring, rc = IRQ_NONE;
1089 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
1093 /* RDR interrupts */
1094 if (status & EIP197_RDR_IRQ(ring)) {
1095 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1097 if (unlikely(stat & EIP197_xDR_ERR)) {
1099 * Fatal error, the RDR is unusable and must be
1100 * reinitialized. This should not happen under
1101 * normal circumstances.
1103 dev_err(priv->dev, "RDR: fatal error.\n");
1104 } else if (likely(stat & EIP197_xDR_THRESH)) {
1105 rc = IRQ_WAKE_THREAD;
1108 /* ACK the interrupts */
1110 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1113 /* ACK the interrupts */
1114 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
1119 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
1121 struct safexcel_ring_irq_data *irq_data = data;
1122 struct safexcel_crypto_priv *priv = irq_data->priv;
1123 int ring = irq_data->ring;
1125 safexcel_handle_result_descriptor(priv, ring);
1127 queue_work(priv->ring[ring].workqueue,
1128 &priv->ring[ring].work_data.work);
1133 static int safexcel_request_ring_irq(void *pdev, int irqid,
1135 irq_handler_t handler,
1136 irq_handler_t threaded_handler,
1137 struct safexcel_ring_irq_data *ring_irq_priv)
1142 if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
1143 struct pci_dev *pci_pdev = pdev;
1145 dev = &pci_pdev->dev;
1146 irq = pci_irq_vector(pci_pdev, irqid);
1148 dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
1152 } else if (IS_ENABLED(CONFIG_OF)) {
1153 struct platform_device *plf_pdev = pdev;
1154 char irq_name[6] = {0}; /* "ringX\0" */
1156 snprintf(irq_name, 6, "ring%d", irqid);
1157 dev = &plf_pdev->dev;
1158 irq = platform_get_irq_byname(plf_pdev, irq_name);
1161 dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
1169 ret = devm_request_threaded_irq(dev, irq, handler,
1170 threaded_handler, IRQF_ONESHOT,
1171 dev_name(dev), ring_irq_priv);
1173 dev_err(dev, "unable to request IRQ %d\n", irq);
1180 static struct safexcel_alg_template *safexcel_algs[] = {
1181 &safexcel_alg_ecb_des,
1182 &safexcel_alg_cbc_des,
1183 &safexcel_alg_ecb_des3_ede,
1184 &safexcel_alg_cbc_des3_ede,
1185 &safexcel_alg_ecb_aes,
1186 &safexcel_alg_cbc_aes,
1187 &safexcel_alg_cfb_aes,
1188 &safexcel_alg_ofb_aes,
1189 &safexcel_alg_ctr_aes,
1192 &safexcel_alg_sha224,
1193 &safexcel_alg_sha256,
1194 &safexcel_alg_sha384,
1195 &safexcel_alg_sha512,
1196 &safexcel_alg_hmac_md5,
1197 &safexcel_alg_hmac_sha1,
1198 &safexcel_alg_hmac_sha224,
1199 &safexcel_alg_hmac_sha256,
1200 &safexcel_alg_hmac_sha384,
1201 &safexcel_alg_hmac_sha512,
1202 &safexcel_alg_authenc_hmac_sha1_cbc_aes,
1203 &safexcel_alg_authenc_hmac_sha224_cbc_aes,
1204 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
1205 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
1206 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
1207 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1208 &safexcel_alg_authenc_hmac_sha1_ctr_aes,
1209 &safexcel_alg_authenc_hmac_sha224_ctr_aes,
1210 &safexcel_alg_authenc_hmac_sha256_ctr_aes,
1211 &safexcel_alg_authenc_hmac_sha384_ctr_aes,
1212 &safexcel_alg_authenc_hmac_sha512_ctr_aes,
1213 &safexcel_alg_xts_aes,
1216 &safexcel_alg_crc32,
1217 &safexcel_alg_cbcmac,
1218 &safexcel_alg_xcbcmac,
1220 &safexcel_alg_chacha20,
1221 &safexcel_alg_chachapoly,
1222 &safexcel_alg_chachapoly_esp,
1224 &safexcel_alg_hmac_sm3,
1225 &safexcel_alg_ecb_sm4,
1226 &safexcel_alg_cbc_sm4,
1227 &safexcel_alg_ofb_sm4,
1228 &safexcel_alg_cfb_sm4,
1229 &safexcel_alg_ctr_sm4,
1230 &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
1231 &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
1232 &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
1233 &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
1234 &safexcel_alg_sha3_224,
1235 &safexcel_alg_sha3_256,
1236 &safexcel_alg_sha3_384,
1237 &safexcel_alg_sha3_512,
1238 &safexcel_alg_hmac_sha3_224,
1239 &safexcel_alg_hmac_sha3_256,
1240 &safexcel_alg_hmac_sha3_384,
1241 &safexcel_alg_hmac_sha3_512,
1242 &safexcel_alg_authenc_hmac_sha1_cbc_des,
1243 &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
1244 &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
1245 &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
1246 &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
1247 &safexcel_alg_authenc_hmac_sha256_cbc_des,
1248 &safexcel_alg_authenc_hmac_sha224_cbc_des,
1249 &safexcel_alg_authenc_hmac_sha512_cbc_des,
1250 &safexcel_alg_authenc_hmac_sha384_cbc_des,
1251 &safexcel_alg_rfc4106_gcm,
1252 &safexcel_alg_rfc4543_gcm,
1253 &safexcel_alg_rfc4309_ccm,
1256 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
1260 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1261 safexcel_algs[i]->priv = priv;
1263 /* Do we have all required base algorithms available? */
1264 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1265 safexcel_algs[i]->algo_mask)
1266 /* No, so don't register this ciphersuite */
1269 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1270 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1271 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1272 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1274 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
1283 for (j = 0; j < i; j++) {
1284 /* Do we have all required base algorithms available? */
1285 if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1286 safexcel_algs[j]->algo_mask)
1287 /* No, so don't unregister this ciphersuite */
1290 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1291 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1292 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
1293 crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1295 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
1301 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
1305 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1306 /* Do we have all required base algorithms available? */
1307 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1308 safexcel_algs[i]->algo_mask)
1309 /* No, so don't unregister this ciphersuite */
1312 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1313 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1314 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1315 crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1317 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
1321 static void safexcel_configure(struct safexcel_crypto_priv *priv)
1323 u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
1325 priv->config.pes = priv->hwconfig.hwnumpes;
1326 priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
1327 /* Cannot currently support more rings than we have ring AICs! */
1328 priv->config.rings = min_t(u32, priv->config.rings,
1329 priv->hwconfig.hwnumraic);
1331 priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
1332 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
1334 /* res token is behind the descr, but ofs must be rounded to buswdth */
1335 priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
1336 /* now the size of the descr is this 1st part plus the result struct */
1337 priv->config.rd_size = priv->config.res_offset +
1338 EIP197_RD64_RESULT_SIZE;
1339 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
1341 /* convert dwords to bytes */
1342 priv->config.cd_offset *= sizeof(u32);
1343 priv->config.rd_offset *= sizeof(u32);
1344 priv->config.res_offset *= sizeof(u32);
1347 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
1349 struct safexcel_register_offsets *offsets = &priv->offsets;
1351 if (priv->flags & SAFEXCEL_HW_EIP197) {
1352 offsets->hia_aic = EIP197_HIA_AIC_BASE;
1353 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
1354 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
1355 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
1356 offsets->hia_dfe = EIP197_HIA_DFE_BASE;
1357 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
1358 offsets->hia_dse = EIP197_HIA_DSE_BASE;
1359 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
1360 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
1361 offsets->pe = EIP197_PE_BASE;
1362 offsets->global = EIP197_GLOBAL_BASE;
1364 offsets->hia_aic = EIP97_HIA_AIC_BASE;
1365 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
1366 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
1367 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
1368 offsets->hia_dfe = EIP97_HIA_DFE_BASE;
1369 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
1370 offsets->hia_dse = EIP97_HIA_DSE_BASE;
1371 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
1372 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
1373 offsets->pe = EIP97_PE_BASE;
1374 offsets->global = EIP97_GLOBAL_BASE;
1379 * Generic part of probe routine, shared by platform and PCI driver
1381 * Assumes IO resources have been mapped, private data mem has been allocated,
1382 * clocks have been enabled, device pointer has been assigned etc.
1385 static int safexcel_probe_generic(void *pdev,
1386 struct safexcel_crypto_priv *priv,
1389 struct device *dev = priv->dev;
1390 u32 peid, version, mask, val, hiaopt, hwopt, peopt;
1393 priv->context_pool = dmam_pool_create("safexcel-context", dev,
1394 sizeof(struct safexcel_context_record),
1396 if (!priv->context_pool)
1400 * First try the EIP97 HIA version regs
1401 * For the EIP197, this is guaranteed to NOT return any of the test
1404 version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
1406 mask = 0; /* do not swap */
1407 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1408 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1409 } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
1410 /* read back byte-swapped, so complement byte swap bits */
1411 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1412 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1414 /* So it wasn't an EIP97 ... maybe it's an EIP197? */
1415 version = readl(priv->base + EIP197_HIA_AIC_BASE +
1416 EIP197_HIA_VERSION);
1417 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1418 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1419 priv->flags |= SAFEXCEL_HW_EIP197;
1420 } else if (EIP197_REG_HI16(version) ==
1421 EIP197_HIA_VERSION_BE) {
1422 /* read back byte-swapped, so complement swap bits */
1423 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1424 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1425 priv->flags |= SAFEXCEL_HW_EIP197;
1431 /* Now initialize the reg offsets based on the probing info so far */
1432 safexcel_init_register_offsets(priv);
1435 * If the version was read byte-swapped, we need to flip the device
1436 * swapping Keep in mind here, though, that what we write will also be
1440 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1441 val = val ^ (mask >> 24); /* toggle byte swap bits */
1442 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1446 * We're not done probing yet! We may fall through to here if no HIA
1447 * was found at all. So, with the endianness presumably correct now and
1448 * the offsets setup, *really* probe for the EIP97/EIP197.
1450 version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
1451 if (((priv->flags & SAFEXCEL_HW_EIP197) &&
1452 (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
1453 (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
1454 ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
1455 (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
1457 * We did not find the device that matched our initial probing
1458 * (or our initial probing failed) Report appropriate error.
1460 dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
1465 priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
1466 hwctg = version >> 28;
1467 peid = version & 255;
1469 /* Detect EIP206 processing pipe */
1470 version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
1471 if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
1472 dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
1475 priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
1477 /* Detect EIP96 packet engine and version */
1478 version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
1479 if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1480 dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
1483 priv->hwconfig.pever = EIP197_VERSION_MASK(version);
1485 hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
1486 hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
1488 if (priv->flags & SAFEXCEL_HW_EIP197) {
1490 peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
1492 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1493 EIP197_HWDATAW_MASK;
1494 priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
1495 EIP197_CFSIZE_MASK) +
1496 EIP197_CFSIZE_ADJUST;
1497 priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
1498 EIP197_RFSIZE_MASK) +
1499 EIP197_RFSIZE_ADJUST;
1500 priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
1502 priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1503 EIP197_N_RINGS_MASK;
1504 if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
1505 priv->flags |= EIP197_PE_ARB;
1506 if (EIP206_OPT_ICE_TYPE(peopt) == 1)
1507 priv->flags |= EIP197_ICE;
1508 /* If not a full TRC, then assume simple TRC */
1509 if (!(hwopt & EIP197_OPT_HAS_TRC))
1510 priv->flags |= EIP197_SIMPLE_TRC;
1511 /* EIP197 always has SOME form of TRC */
1512 priv->flags |= EIP197_TRC_CACHE;
1515 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1517 priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
1519 priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
1521 priv->hwconfig.hwnumpes = 1; /* by definition */
1522 priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1523 EIP197_N_RINGS_MASK;
1526 /* Scan for ring AIC's */
1527 for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
1528 version = readl(EIP197_HIA_AIC_R(priv) +
1529 EIP197_HIA_AIC_R_VERSION(i));
1530 if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
1533 priv->hwconfig.hwnumraic = i;
1534 /* Low-end EIP196 may not have any ring AIC's ... */
1535 if (!priv->hwconfig.hwnumraic) {
1536 dev_err(priv->dev, "No ring interrupt controller present!\n");
1540 /* Get supported algorithms from EIP96 transform engine */
1541 priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1542 EIP197_PE_EIP96_OPTIONS(0));
1544 /* Print single info line describing what we just detected */
1545 dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
1546 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
1547 priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
1548 priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
1549 priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
1550 priv->hwconfig.ppver, priv->hwconfig.pever,
1551 priv->hwconfig.algo_flags);
1553 safexcel_configure(priv);
1555 if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1557 * Request MSI vectors for global + 1 per ring -
1558 * or just 1 for older dev images
1560 struct pci_dev *pci_pdev = pdev;
1562 ret = pci_alloc_irq_vectors(pci_pdev,
1563 priv->config.rings + 1,
1564 priv->config.rings + 1,
1565 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1567 dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1572 /* Register the ring IRQ handlers and configure the rings */
1573 priv->ring = devm_kcalloc(dev, priv->config.rings,
1574 sizeof(*priv->ring),
1579 for (i = 0; i < priv->config.rings; i++) {
1580 char wq_name[9] = {0};
1582 struct safexcel_ring_irq_data *ring_irq;
1584 ret = safexcel_init_ring_descriptors(priv,
1586 &priv->ring[i].rdr);
1588 dev_err(dev, "Failed to initialize rings\n");
1592 priv->ring[i].rdr_req = devm_kcalloc(dev,
1593 EIP197_DEFAULT_RING_SIZE,
1594 sizeof(priv->ring[i].rdr_req),
1596 if (!priv->ring[i].rdr_req)
1599 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1603 ring_irq->priv = priv;
1606 irq = safexcel_request_ring_irq(pdev,
1607 EIP197_IRQ_NUMBER(i, is_pci_dev),
1610 safexcel_irq_ring_thread,
1613 dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1617 priv->ring[i].work_data.priv = priv;
1618 priv->ring[i].work_data.ring = i;
1619 INIT_WORK(&priv->ring[i].work_data.work,
1620 safexcel_dequeue_work);
1622 snprintf(wq_name, 9, "wq_ring%d", i);
1623 priv->ring[i].workqueue =
1624 create_singlethread_workqueue(wq_name);
1625 if (!priv->ring[i].workqueue)
1628 priv->ring[i].requests = 0;
1629 priv->ring[i].busy = false;
1631 crypto_init_queue(&priv->ring[i].queue,
1632 EIP197_DEFAULT_RING_SIZE);
1634 spin_lock_init(&priv->ring[i].lock);
1635 spin_lock_init(&priv->ring[i].queue_lock);
1638 atomic_set(&priv->ring_used, 0);
1640 ret = safexcel_hw_init(priv);
1642 dev_err(dev, "HW init failed (%d)\n", ret);
1646 ret = safexcel_register_algorithms(priv);
1648 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1655 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1659 for (i = 0; i < priv->config.rings; i++) {
1660 /* clear any pending interrupt */
1661 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1662 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1664 /* Reset the CDR base address */
1665 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1666 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1668 /* Reset the RDR base address */
1669 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1670 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1674 /* for Device Tree platform driver */
1676 static int safexcel_probe(struct platform_device *pdev)
1678 struct device *dev = &pdev->dev;
1679 struct safexcel_crypto_priv *priv;
1682 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1687 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1689 platform_set_drvdata(pdev, priv);
1691 priv->base = devm_platform_ioremap_resource(pdev, 0);
1692 if (IS_ERR(priv->base)) {
1693 dev_err(dev, "failed to get resource\n");
1694 return PTR_ERR(priv->base);
1697 priv->clk = devm_clk_get(&pdev->dev, NULL);
1698 ret = PTR_ERR_OR_ZERO(priv->clk);
1699 /* The clock isn't mandatory */
1700 if (ret != -ENOENT) {
1704 ret = clk_prepare_enable(priv->clk);
1706 dev_err(dev, "unable to enable clk (%d)\n", ret);
1711 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1712 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1713 /* The clock isn't mandatory */
1714 if (ret != -ENOENT) {
1718 ret = clk_prepare_enable(priv->reg_clk);
1720 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1725 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1729 /* Generic EIP97/EIP197 device probing */
1730 ret = safexcel_probe_generic(pdev, priv, 0);
1737 clk_disable_unprepare(priv->reg_clk);
1739 clk_disable_unprepare(priv->clk);
1743 static int safexcel_remove(struct platform_device *pdev)
1745 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1748 safexcel_unregister_algorithms(priv);
1749 safexcel_hw_reset_rings(priv);
1751 clk_disable_unprepare(priv->clk);
1753 for (i = 0; i < priv->config.rings; i++)
1754 destroy_workqueue(priv->ring[i].workqueue);
1759 static const struct of_device_id safexcel_of_match_table[] = {
1761 .compatible = "inside-secure,safexcel-eip97ies",
1762 .data = (void *)EIP97IES_MRVL,
1765 .compatible = "inside-secure,safexcel-eip197b",
1766 .data = (void *)EIP197B_MRVL,
1769 .compatible = "inside-secure,safexcel-eip197d",
1770 .data = (void *)EIP197D_MRVL,
1772 /* For backward compatibility and intended for generic use */
1774 .compatible = "inside-secure,safexcel-eip97",
1775 .data = (void *)EIP97IES_MRVL,
1778 .compatible = "inside-secure,safexcel-eip197",
1779 .data = (void *)EIP197B_MRVL,
1784 static struct platform_driver crypto_safexcel = {
1785 .probe = safexcel_probe,
1786 .remove = safexcel_remove,
1788 .name = "crypto-safexcel",
1789 .of_match_table = safexcel_of_match_table,
1793 /* PCIE devices - i.e. Inside Secure development boards */
1795 static int safexcel_pci_probe(struct pci_dev *pdev,
1796 const struct pci_device_id *ent)
1798 struct device *dev = &pdev->dev;
1799 struct safexcel_crypto_priv *priv;
1800 void __iomem *pciebase;
1804 dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1805 ent->vendor, ent->device, ent->subvendor,
1806 ent->subdevice, ent->driver_data);
1808 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1813 priv->version = (enum safexcel_eip_version)ent->driver_data;
1815 pci_set_drvdata(pdev, priv);
1817 /* enable the device */
1818 rc = pcim_enable_device(pdev);
1820 dev_err(dev, "Failed to enable PCI device\n");
1824 /* take ownership of PCI BAR0 */
1825 rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1827 dev_err(dev, "Failed to map IO region for BAR0\n");
1830 priv->base = pcim_iomap_table(pdev)[0];
1832 if (priv->version == EIP197_DEVBRD) {
1833 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1835 rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1837 dev_err(dev, "Failed to map IO region for BAR4\n");
1841 pciebase = pcim_iomap_table(pdev)[2];
1842 val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1843 if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1844 dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1847 /* Setup MSI identity map mapping */
1848 writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1849 pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1850 writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1851 pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1852 writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1853 pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1854 writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1855 pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1857 /* Enable all device interrupts */
1858 writel(GENMASK(31, 0),
1859 pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1861 dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1866 /* HW reset FPGA dev board */
1868 writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1869 wmb(); /* maintain strict ordering for accesses here */
1870 /* deassert reset */
1871 writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1872 wmb(); /* maintain strict ordering for accesses here */
1875 /* enable bus mastering */
1876 pci_set_master(pdev);
1878 /* Generic EIP97/EIP197 device probing */
1879 rc = safexcel_probe_generic(pdev, priv, 1);
1883 static void safexcel_pci_remove(struct pci_dev *pdev)
1885 struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1888 safexcel_unregister_algorithms(priv);
1890 for (i = 0; i < priv->config.rings; i++)
1891 destroy_workqueue(priv->ring[i].workqueue);
1893 safexcel_hw_reset_rings(priv);
1896 static const struct pci_device_id safexcel_pci_ids[] = {
1898 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1900 .driver_data = EIP197_DEVBRD,
1905 MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1907 static struct pci_driver safexcel_pci_driver = {
1908 .name = "crypto-safexcel",
1909 .id_table = safexcel_pci_ids,
1910 .probe = safexcel_pci_probe,
1911 .remove = safexcel_pci_remove,
1914 static int __init safexcel_init(void)
1918 /* Register PCI driver */
1919 ret = pci_register_driver(&safexcel_pci_driver);
1921 /* Register platform driver */
1922 if (IS_ENABLED(CONFIG_OF) && !ret) {
1923 ret = platform_driver_register(&crypto_safexcel);
1925 pci_unregister_driver(&safexcel_pci_driver);
1931 static void __exit safexcel_exit(void)
1933 /* Unregister platform driver */
1934 if (IS_ENABLED(CONFIG_OF))
1935 platform_driver_unregister(&crypto_safexcel);
1937 /* Unregister PCI driver if successfully registered before */
1938 pci_unregister_driver(&safexcel_pci_driver);
1941 module_init(safexcel_init);
1942 module_exit(safexcel_exit);
1944 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1945 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1946 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1947 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1948 MODULE_LICENSE("GPL v2");