Merge tag 'xfs-4.15-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[sfrench/cifs-2.6.git] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59                            unsigned int len, bool is_sec1)
60 {
61         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62         if (is_sec1) {
63                 ptr->len1 = cpu_to_be16(len);
64         } else {
65                 ptr->len = cpu_to_be16(len);
66                 ptr->eptr = upper_32_bits(dma_addr);
67         }
68 }
69
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71                              struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73         dst_ptr->ptr = src_ptr->ptr;
74         if (is_sec1) {
75                 dst_ptr->len1 = src_ptr->len1;
76         } else {
77                 dst_ptr->len = src_ptr->len;
78                 dst_ptr->eptr = src_ptr->eptr;
79         }
80 }
81
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83                                            bool is_sec1)
84 {
85         if (is_sec1)
86                 return be16_to_cpu(ptr->len1);
87         else
88                 return be16_to_cpu(ptr->len);
89 }
90
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92                                    bool is_sec1)
93 {
94         if (!is_sec1)
95                 ptr->j_extent = val;
96 }
97
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100         if (!is_sec1)
101                 ptr->j_extent |= val;
102 }
103
104 /*
105  * map virtual single (contiguous) pointer to h/w descriptor pointer
106  */
107 static void map_single_talitos_ptr(struct device *dev,
108                                    struct talitos_ptr *ptr,
109                                    unsigned int len, void *data,
110                                    enum dma_data_direction dir)
111 {
112         dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
113         struct talitos_private *priv = dev_get_drvdata(dev);
114         bool is_sec1 = has_ftr_sec1(priv);
115
116         to_talitos_ptr(ptr, dma_addr, len, is_sec1);
117 }
118
119 /*
120  * unmap bus single (contiguous) h/w descriptor pointer
121  */
122 static void unmap_single_talitos_ptr(struct device *dev,
123                                      struct talitos_ptr *ptr,
124                                      enum dma_data_direction dir)
125 {
126         struct talitos_private *priv = dev_get_drvdata(dev);
127         bool is_sec1 = has_ftr_sec1(priv);
128
129         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
130                          from_talitos_ptr_len(ptr, is_sec1), dir);
131 }
132
133 static int reset_channel(struct device *dev, int ch)
134 {
135         struct talitos_private *priv = dev_get_drvdata(dev);
136         unsigned int timeout = TALITOS_TIMEOUT;
137         bool is_sec1 = has_ftr_sec1(priv);
138
139         if (is_sec1) {
140                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
141                           TALITOS1_CCCR_LO_RESET);
142
143                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
144                         TALITOS1_CCCR_LO_RESET) && --timeout)
145                         cpu_relax();
146         } else {
147                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
148                           TALITOS2_CCCR_RESET);
149
150                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
151                         TALITOS2_CCCR_RESET) && --timeout)
152                         cpu_relax();
153         }
154
155         if (timeout == 0) {
156                 dev_err(dev, "failed to reset channel %d\n", ch);
157                 return -EIO;
158         }
159
160         /* set 36-bit addressing, done writeback enable and done IRQ enable */
161         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
162                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
163         /* enable chaining descriptors */
164         if (is_sec1)
165                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
166                           TALITOS_CCCR_LO_NE);
167
168         /* and ICCR writeback, if available */
169         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
170                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171                           TALITOS_CCCR_LO_IWSE);
172
173         return 0;
174 }
175
176 static int reset_device(struct device *dev)
177 {
178         struct talitos_private *priv = dev_get_drvdata(dev);
179         unsigned int timeout = TALITOS_TIMEOUT;
180         bool is_sec1 = has_ftr_sec1(priv);
181         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
182
183         setbits32(priv->reg + TALITOS_MCR, mcr);
184
185         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
186                && --timeout)
187                 cpu_relax();
188
189         if (priv->irq[1]) {
190                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
191                 setbits32(priv->reg + TALITOS_MCR, mcr);
192         }
193
194         if (timeout == 0) {
195                 dev_err(dev, "failed to reset device\n");
196                 return -EIO;
197         }
198
199         return 0;
200 }
201
202 /*
203  * Reset and initialize the device
204  */
205 static int init_device(struct device *dev)
206 {
207         struct talitos_private *priv = dev_get_drvdata(dev);
208         int ch, err;
209         bool is_sec1 = has_ftr_sec1(priv);
210
211         /*
212          * Master reset
213          * errata documentation: warning: certain SEC interrupts
214          * are not fully cleared by writing the MCR:SWR bit,
215          * set bit twice to completely reset
216          */
217         err = reset_device(dev);
218         if (err)
219                 return err;
220
221         err = reset_device(dev);
222         if (err)
223                 return err;
224
225         /* reset channels */
226         for (ch = 0; ch < priv->num_channels; ch++) {
227                 err = reset_channel(dev, ch);
228                 if (err)
229                         return err;
230         }
231
232         /* enable channel done and error interrupts */
233         if (is_sec1) {
234                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
235                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
236                 /* disable parity error check in DEU (erroneous? test vect.) */
237                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
238         } else {
239                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
240                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
241         }
242
243         /* disable integrity check error interrupts (use writeback instead) */
244         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
245                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
246                           TALITOS_MDEUICR_LO_ICE);
247
248         return 0;
249 }
250
251 /**
252  * talitos_submit - submits a descriptor to the device for processing
253  * @dev:        the SEC device to be used
254  * @ch:         the SEC device channel to be used
255  * @desc:       the descriptor to be processed by the device
256  * @callback:   whom to call when processing is complete
257  * @context:    a handle for use by caller (optional)
258  *
259  * desc must contain valid dma-mapped (bus physical) address pointers.
260  * callback must check err and feedback in descriptor header
261  * for device processing status.
262  */
263 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
264                    void (*callback)(struct device *dev,
265                                     struct talitos_desc *desc,
266                                     void *context, int error),
267                    void *context)
268 {
269         struct talitos_private *priv = dev_get_drvdata(dev);
270         struct talitos_request *request;
271         unsigned long flags;
272         int head;
273         bool is_sec1 = has_ftr_sec1(priv);
274
275         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
276
277         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
278                 /* h/w fifo is full */
279                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
280                 return -EAGAIN;
281         }
282
283         head = priv->chan[ch].head;
284         request = &priv->chan[ch].fifo[head];
285
286         /* map descriptor and save caller data */
287         if (is_sec1) {
288                 desc->hdr1 = desc->hdr;
289                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
290                                                    TALITOS_DESC_SIZE,
291                                                    DMA_BIDIRECTIONAL);
292         } else {
293                 request->dma_desc = dma_map_single(dev, desc,
294                                                    TALITOS_DESC_SIZE,
295                                                    DMA_BIDIRECTIONAL);
296         }
297         request->callback = callback;
298         request->context = context;
299
300         /* increment fifo head */
301         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
302
303         smp_wmb();
304         request->desc = desc;
305
306         /* GO! */
307         wmb();
308         out_be32(priv->chan[ch].reg + TALITOS_FF,
309                  upper_32_bits(request->dma_desc));
310         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
311                  lower_32_bits(request->dma_desc));
312
313         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
314
315         return -EINPROGRESS;
316 }
317 EXPORT_SYMBOL(talitos_submit);
318
319 /*
320  * process what was done, notify callback of error if not
321  */
322 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
323 {
324         struct talitos_private *priv = dev_get_drvdata(dev);
325         struct talitos_request *request, saved_req;
326         unsigned long flags;
327         int tail, status;
328         bool is_sec1 = has_ftr_sec1(priv);
329
330         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
331
332         tail = priv->chan[ch].tail;
333         while (priv->chan[ch].fifo[tail].desc) {
334                 __be32 hdr;
335
336                 request = &priv->chan[ch].fifo[tail];
337
338                 /* descriptors with their done bits set don't get the error */
339                 rmb();
340                 if (!is_sec1)
341                         hdr = request->desc->hdr;
342                 else if (request->desc->next_desc)
343                         hdr = (request->desc + 1)->hdr1;
344                 else
345                         hdr = request->desc->hdr1;
346
347                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
348                         status = 0;
349                 else
350                         if (!error)
351                                 break;
352                         else
353                                 status = error;
354
355                 dma_unmap_single(dev, request->dma_desc,
356                                  TALITOS_DESC_SIZE,
357                                  DMA_BIDIRECTIONAL);
358
359                 /* copy entries so we can call callback outside lock */
360                 saved_req.desc = request->desc;
361                 saved_req.callback = request->callback;
362                 saved_req.context = request->context;
363
364                 /* release request entry in fifo */
365                 smp_wmb();
366                 request->desc = NULL;
367
368                 /* increment fifo tail */
369                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
370
371                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
372
373                 atomic_dec(&priv->chan[ch].submit_count);
374
375                 saved_req.callback(dev, saved_req.desc, saved_req.context,
376                                    status);
377                 /* channel may resume processing in single desc error case */
378                 if (error && !reset_ch && status == error)
379                         return;
380                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
381                 tail = priv->chan[ch].tail;
382         }
383
384         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
385 }
386
387 /*
388  * process completed requests for channels that have done status
389  */
390 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
391 static void talitos1_done_##name(unsigned long data)                    \
392 {                                                                       \
393         struct device *dev = (struct device *)data;                     \
394         struct talitos_private *priv = dev_get_drvdata(dev);            \
395         unsigned long flags;                                            \
396                                                                         \
397         if (ch_done_mask & 0x10000000)                                  \
398                 flush_channel(dev, 0, 0, 0);                    \
399         if (ch_done_mask & 0x40000000)                                  \
400                 flush_channel(dev, 1, 0, 0);                    \
401         if (ch_done_mask & 0x00010000)                                  \
402                 flush_channel(dev, 2, 0, 0);                    \
403         if (ch_done_mask & 0x00040000)                                  \
404                 flush_channel(dev, 3, 0, 0);                    \
405                                                                         \
406         /* At this point, all completed channels have been processed */ \
407         /* Unmask done interrupts for channels completed later on. */   \
408         spin_lock_irqsave(&priv->reg_lock, flags);                      \
409         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
410         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
411         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
412 }
413
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
416
417 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
418 static void talitos2_done_##name(unsigned long data)                    \
419 {                                                                       \
420         struct device *dev = (struct device *)data;                     \
421         struct talitos_private *priv = dev_get_drvdata(dev);            \
422         unsigned long flags;                                            \
423                                                                         \
424         if (ch_done_mask & 1)                                           \
425                 flush_channel(dev, 0, 0, 0);                            \
426         if (ch_done_mask & (1 << 2))                                    \
427                 flush_channel(dev, 1, 0, 0);                            \
428         if (ch_done_mask & (1 << 4))                                    \
429                 flush_channel(dev, 2, 0, 0);                            \
430         if (ch_done_mask & (1 << 6))                                    \
431                 flush_channel(dev, 3, 0, 0);                            \
432                                                                         \
433         /* At this point, all completed channels have been processed */ \
434         /* Unmask done interrupts for channels completed later on. */   \
435         spin_lock_irqsave(&priv->reg_lock, flags);                      \
436         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
437         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
438         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
439 }
440
441 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
442 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
443 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
444 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
445
446 /*
447  * locate current (offending) descriptor
448  */
449 static u32 current_desc_hdr(struct device *dev, int ch)
450 {
451         struct talitos_private *priv = dev_get_drvdata(dev);
452         int tail, iter;
453         dma_addr_t cur_desc;
454
455         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
456         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
457
458         if (!cur_desc) {
459                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
460                 return 0;
461         }
462
463         tail = priv->chan[ch].tail;
464
465         iter = tail;
466         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
467                priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
468                 iter = (iter + 1) & (priv->fifo_len - 1);
469                 if (iter == tail) {
470                         dev_err(dev, "couldn't locate current descriptor\n");
471                         return 0;
472                 }
473         }
474
475         if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
476                 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
477
478         return priv->chan[ch].fifo[iter].desc->hdr;
479 }
480
481 /*
482  * user diagnostics; report root cause of error based on execution unit status
483  */
484 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
485 {
486         struct talitos_private *priv = dev_get_drvdata(dev);
487         int i;
488
489         if (!desc_hdr)
490                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
491
492         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
493         case DESC_HDR_SEL0_AFEU:
494                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
495                         in_be32(priv->reg_afeu + TALITOS_EUISR),
496                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
497                 break;
498         case DESC_HDR_SEL0_DEU:
499                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
500                         in_be32(priv->reg_deu + TALITOS_EUISR),
501                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
502                 break;
503         case DESC_HDR_SEL0_MDEUA:
504         case DESC_HDR_SEL0_MDEUB:
505                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
506                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
507                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
508                 break;
509         case DESC_HDR_SEL0_RNG:
510                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
511                         in_be32(priv->reg_rngu + TALITOS_ISR),
512                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
513                 break;
514         case DESC_HDR_SEL0_PKEU:
515                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
516                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
517                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
518                 break;
519         case DESC_HDR_SEL0_AESU:
520                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
521                         in_be32(priv->reg_aesu + TALITOS_EUISR),
522                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
523                 break;
524         case DESC_HDR_SEL0_CRCU:
525                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
526                         in_be32(priv->reg_crcu + TALITOS_EUISR),
527                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
528                 break;
529         case DESC_HDR_SEL0_KEU:
530                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
531                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
532                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
533                 break;
534         }
535
536         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
537         case DESC_HDR_SEL1_MDEUA:
538         case DESC_HDR_SEL1_MDEUB:
539                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
541                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
542                 break;
543         case DESC_HDR_SEL1_CRCU:
544                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
545                         in_be32(priv->reg_crcu + TALITOS_EUISR),
546                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
547                 break;
548         }
549
550         for (i = 0; i < 8; i++)
551                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
552                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
553                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
554 }
555
556 /*
557  * recover from error interrupts
558  */
559 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
560 {
561         struct talitos_private *priv = dev_get_drvdata(dev);
562         unsigned int timeout = TALITOS_TIMEOUT;
563         int ch, error, reset_dev = 0;
564         u32 v_lo;
565         bool is_sec1 = has_ftr_sec1(priv);
566         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
567
568         for (ch = 0; ch < priv->num_channels; ch++) {
569                 /* skip channels without errors */
570                 if (is_sec1) {
571                         /* bits 29, 31, 17, 19 */
572                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
573                                 continue;
574                 } else {
575                         if (!(isr & (1 << (ch * 2 + 1))))
576                                 continue;
577                 }
578
579                 error = -EINVAL;
580
581                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
582
583                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
584                         dev_err(dev, "double fetch fifo overflow error\n");
585                         error = -EAGAIN;
586                         reset_ch = 1;
587                 }
588                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
589                         /* h/w dropped descriptor */
590                         dev_err(dev, "single fetch fifo overflow error\n");
591                         error = -EAGAIN;
592                 }
593                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
594                         dev_err(dev, "master data transfer error\n");
595                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
596                         dev_err(dev, is_sec1 ? "pointer not complete error\n"
597                                              : "s/g data length zero error\n");
598                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
599                         dev_err(dev, is_sec1 ? "parity error\n"
600                                              : "fetch pointer zero error\n");
601                 if (v_lo & TALITOS_CCPSR_LO_IDH)
602                         dev_err(dev, "illegal descriptor header error\n");
603                 if (v_lo & TALITOS_CCPSR_LO_IEU)
604                         dev_err(dev, is_sec1 ? "static assignment error\n"
605                                              : "invalid exec unit error\n");
606                 if (v_lo & TALITOS_CCPSR_LO_EU)
607                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
608                 if (!is_sec1) {
609                         if (v_lo & TALITOS_CCPSR_LO_GB)
610                                 dev_err(dev, "gather boundary error\n");
611                         if (v_lo & TALITOS_CCPSR_LO_GRL)
612                                 dev_err(dev, "gather return/length error\n");
613                         if (v_lo & TALITOS_CCPSR_LO_SB)
614                                 dev_err(dev, "scatter boundary error\n");
615                         if (v_lo & TALITOS_CCPSR_LO_SRL)
616                                 dev_err(dev, "scatter return/length error\n");
617                 }
618
619                 flush_channel(dev, ch, error, reset_ch);
620
621                 if (reset_ch) {
622                         reset_channel(dev, ch);
623                 } else {
624                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
625                                   TALITOS2_CCCR_CONT);
626                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
627                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
628                                TALITOS2_CCCR_CONT) && --timeout)
629                                 cpu_relax();
630                         if (timeout == 0) {
631                                 dev_err(dev, "failed to restart channel %d\n",
632                                         ch);
633                                 reset_dev = 1;
634                         }
635                 }
636         }
637         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
638             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
639                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
640                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
641                                 isr, isr_lo);
642                 else
643                         dev_err(dev, "done overflow, internal time out, or "
644                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
645
646                 /* purge request queues */
647                 for (ch = 0; ch < priv->num_channels; ch++)
648                         flush_channel(dev, ch, -EIO, 1);
649
650                 /* reset and reinitialize the device */
651                 init_device(dev);
652         }
653 }
654
655 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
656 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
657 {                                                                              \
658         struct device *dev = data;                                             \
659         struct talitos_private *priv = dev_get_drvdata(dev);                   \
660         u32 isr, isr_lo;                                                       \
661         unsigned long flags;                                                   \
662                                                                                \
663         spin_lock_irqsave(&priv->reg_lock, flags);                             \
664         isr = in_be32(priv->reg + TALITOS_ISR);                                \
665         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
666         /* Acknowledge interrupt */                                            \
667         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
668         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
669                                                                                \
670         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
671                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
672                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
673         }                                                                      \
674         else {                                                                 \
675                 if (likely(isr & ch_done_mask)) {                              \
676                         /* mask further done interrupts. */                    \
677                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
678                         /* done_task will unmask done interrupts at exit */    \
679                         tasklet_schedule(&priv->done_task[tlet]);              \
680                 }                                                              \
681                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
682         }                                                                      \
683                                                                                \
684         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
685                                                                 IRQ_NONE;      \
686 }
687
688 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
689
690 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
691 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
692 {                                                                              \
693         struct device *dev = data;                                             \
694         struct talitos_private *priv = dev_get_drvdata(dev);                   \
695         u32 isr, isr_lo;                                                       \
696         unsigned long flags;                                                   \
697                                                                                \
698         spin_lock_irqsave(&priv->reg_lock, flags);                             \
699         isr = in_be32(priv->reg + TALITOS_ISR);                                \
700         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
701         /* Acknowledge interrupt */                                            \
702         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
703         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
704                                                                                \
705         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
706                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
707                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
708         }                                                                      \
709         else {                                                                 \
710                 if (likely(isr & ch_done_mask)) {                              \
711                         /* mask further done interrupts. */                    \
712                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
713                         /* done_task will unmask done interrupts at exit */    \
714                         tasklet_schedule(&priv->done_task[tlet]);              \
715                 }                                                              \
716                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
717         }                                                                      \
718                                                                                \
719         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
720                                                                 IRQ_NONE;      \
721 }
722
723 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
724 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
725                        0)
726 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
727                        1)
728
729 /*
730  * hwrng
731  */
732 static int talitos_rng_data_present(struct hwrng *rng, int wait)
733 {
734         struct device *dev = (struct device *)rng->priv;
735         struct talitos_private *priv = dev_get_drvdata(dev);
736         u32 ofl;
737         int i;
738
739         for (i = 0; i < 20; i++) {
740                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
741                       TALITOS_RNGUSR_LO_OFL;
742                 if (ofl || !wait)
743                         break;
744                 udelay(10);
745         }
746
747         return !!ofl;
748 }
749
750 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
751 {
752         struct device *dev = (struct device *)rng->priv;
753         struct talitos_private *priv = dev_get_drvdata(dev);
754
755         /* rng fifo requires 64-bit accesses */
756         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
757         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
758
759         return sizeof(u32);
760 }
761
762 static int talitos_rng_init(struct hwrng *rng)
763 {
764         struct device *dev = (struct device *)rng->priv;
765         struct talitos_private *priv = dev_get_drvdata(dev);
766         unsigned int timeout = TALITOS_TIMEOUT;
767
768         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
769         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
770                  & TALITOS_RNGUSR_LO_RD)
771                && --timeout)
772                 cpu_relax();
773         if (timeout == 0) {
774                 dev_err(dev, "failed to reset rng hw\n");
775                 return -ENODEV;
776         }
777
778         /* start generating */
779         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
780
781         return 0;
782 }
783
784 static int talitos_register_rng(struct device *dev)
785 {
786         struct talitos_private *priv = dev_get_drvdata(dev);
787         int err;
788
789         priv->rng.name          = dev_driver_string(dev),
790         priv->rng.init          = talitos_rng_init,
791         priv->rng.data_present  = talitos_rng_data_present,
792         priv->rng.data_read     = talitos_rng_data_read,
793         priv->rng.priv          = (unsigned long)dev;
794
795         err = hwrng_register(&priv->rng);
796         if (!err)
797                 priv->rng_registered = true;
798
799         return err;
800 }
801
802 static void talitos_unregister_rng(struct device *dev)
803 {
804         struct talitos_private *priv = dev_get_drvdata(dev);
805
806         if (!priv->rng_registered)
807                 return;
808
809         hwrng_unregister(&priv->rng);
810         priv->rng_registered = false;
811 }
812
813 /*
814  * crypto alg
815  */
816 #define TALITOS_CRA_PRIORITY            3000
817 /*
818  * Defines a priority for doing AEAD with descriptors type
819  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
820  */
821 #define TALITOS_CRA_PRIORITY_AEAD_HSNA  (TALITOS_CRA_PRIORITY - 1)
822 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
823 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
824
825 struct talitos_ctx {
826         struct device *dev;
827         int ch;
828         __be32 desc_hdr_template;
829         u8 key[TALITOS_MAX_KEY_SIZE];
830         u8 iv[TALITOS_MAX_IV_LENGTH];
831         dma_addr_t dma_key;
832         unsigned int keylen;
833         unsigned int enckeylen;
834         unsigned int authkeylen;
835         dma_addr_t dma_buf;
836         dma_addr_t dma_hw_context;
837 };
838
839 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
840 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
841
842 struct talitos_ahash_req_ctx {
843         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
844         unsigned int hw_context_size;
845         u8 buf[2][HASH_MAX_BLOCK_SIZE];
846         int buf_idx;
847         unsigned int swinit;
848         unsigned int first;
849         unsigned int last;
850         unsigned int to_hash_later;
851         unsigned int nbuf;
852         struct scatterlist bufsl[2];
853         struct scatterlist *psrc;
854 };
855
856 struct talitos_export_state {
857         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
858         u8 buf[HASH_MAX_BLOCK_SIZE];
859         unsigned int swinit;
860         unsigned int first;
861         unsigned int last;
862         unsigned int to_hash_later;
863         unsigned int nbuf;
864 };
865
866 static int aead_setkey(struct crypto_aead *authenc,
867                        const u8 *key, unsigned int keylen)
868 {
869         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
870         struct device *dev = ctx->dev;
871         struct crypto_authenc_keys keys;
872
873         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
874                 goto badkey;
875
876         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
877                 goto badkey;
878
879         if (ctx->keylen)
880                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
881
882         memcpy(ctx->key, keys.authkey, keys.authkeylen);
883         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
884
885         ctx->keylen = keys.authkeylen + keys.enckeylen;
886         ctx->enckeylen = keys.enckeylen;
887         ctx->authkeylen = keys.authkeylen;
888         ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
889                                       DMA_TO_DEVICE);
890
891         return 0;
892
893 badkey:
894         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
895         return -EINVAL;
896 }
897
898 /*
899  * talitos_edesc - s/w-extended descriptor
900  * @src_nents: number of segments in input scatterlist
901  * @dst_nents: number of segments in output scatterlist
902  * @icv_ool: whether ICV is out-of-line
903  * @iv_dma: dma address of iv for checking continuity and link table
904  * @dma_len: length of dma mapped link_tbl space
905  * @dma_link_tbl: bus physical address of link_tbl/buf
906  * @desc: h/w descriptor
907  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
908  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
909  *
910  * if decrypting (with authcheck), or either one of src_nents or dst_nents
911  * is greater than 1, an integrity check value is concatenated to the end
912  * of link_tbl data
913  */
914 struct talitos_edesc {
915         int src_nents;
916         int dst_nents;
917         bool icv_ool;
918         dma_addr_t iv_dma;
919         int dma_len;
920         dma_addr_t dma_link_tbl;
921         struct talitos_desc desc;
922         union {
923                 struct talitos_ptr link_tbl[0];
924                 u8 buf[0];
925         };
926 };
927
928 static void talitos_sg_unmap(struct device *dev,
929                              struct talitos_edesc *edesc,
930                              struct scatterlist *src,
931                              struct scatterlist *dst,
932                              unsigned int len, unsigned int offset)
933 {
934         struct talitos_private *priv = dev_get_drvdata(dev);
935         bool is_sec1 = has_ftr_sec1(priv);
936         unsigned int src_nents = edesc->src_nents ? : 1;
937         unsigned int dst_nents = edesc->dst_nents ? : 1;
938
939         if (is_sec1 && dst && dst_nents > 1) {
940                 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
941                                            len, DMA_FROM_DEVICE);
942                 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
943                                      offset);
944         }
945         if (src != dst) {
946                 if (src_nents == 1 || !is_sec1)
947                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
948
949                 if (dst && (dst_nents == 1 || !is_sec1))
950                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
951         } else if (src_nents == 1 || !is_sec1) {
952                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
953         }
954 }
955
956 static void ipsec_esp_unmap(struct device *dev,
957                             struct talitos_edesc *edesc,
958                             struct aead_request *areq)
959 {
960         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
961         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
962         unsigned int ivsize = crypto_aead_ivsize(aead);
963         bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
964         struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
965
966         if (is_ipsec_esp)
967                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
968                                          DMA_FROM_DEVICE);
969         unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
970
971         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
972                          areq->assoclen);
973
974         if (edesc->dma_len)
975                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
976                                  DMA_BIDIRECTIONAL);
977
978         if (!is_ipsec_esp) {
979                 unsigned int dst_nents = edesc->dst_nents ? : 1;
980
981                 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
982                                    areq->assoclen + areq->cryptlen - ivsize);
983         }
984 }
985
986 /*
987  * ipsec_esp descriptor callbacks
988  */
989 static void ipsec_esp_encrypt_done(struct device *dev,
990                                    struct talitos_desc *desc, void *context,
991                                    int err)
992 {
993         struct talitos_private *priv = dev_get_drvdata(dev);
994         bool is_sec1 = has_ftr_sec1(priv);
995         struct aead_request *areq = context;
996         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
997         unsigned int authsize = crypto_aead_authsize(authenc);
998         unsigned int ivsize = crypto_aead_ivsize(authenc);
999         struct talitos_edesc *edesc;
1000         struct scatterlist *sg;
1001         void *icvdata;
1002
1003         edesc = container_of(desc, struct talitos_edesc, desc);
1004
1005         ipsec_esp_unmap(dev, edesc, areq);
1006
1007         /* copy the generated ICV to dst */
1008         if (edesc->icv_ool) {
1009                 if (is_sec1)
1010                         icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1011                 else
1012                         icvdata = &edesc->link_tbl[edesc->src_nents +
1013                                                    edesc->dst_nents + 2];
1014                 sg = sg_last(areq->dst, edesc->dst_nents);
1015                 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1016                        icvdata, authsize);
1017         }
1018
1019         dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1020
1021         kfree(edesc);
1022
1023         aead_request_complete(areq, err);
1024 }
1025
1026 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1027                                           struct talitos_desc *desc,
1028                                           void *context, int err)
1029 {
1030         struct aead_request *req = context;
1031         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1032         unsigned int authsize = crypto_aead_authsize(authenc);
1033         struct talitos_edesc *edesc;
1034         struct scatterlist *sg;
1035         char *oicv, *icv;
1036         struct talitos_private *priv = dev_get_drvdata(dev);
1037         bool is_sec1 = has_ftr_sec1(priv);
1038
1039         edesc = container_of(desc, struct talitos_edesc, desc);
1040
1041         ipsec_esp_unmap(dev, edesc, req);
1042
1043         if (!err) {
1044                 /* auth check */
1045                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1046                 icv = (char *)sg_virt(sg) + sg->length - authsize;
1047
1048                 if (edesc->dma_len) {
1049                         if (is_sec1)
1050                                 oicv = (char *)&edesc->dma_link_tbl +
1051                                                req->assoclen + req->cryptlen;
1052                         else
1053                                 oicv = (char *)
1054                                        &edesc->link_tbl[edesc->src_nents +
1055                                                         edesc->dst_nents + 2];
1056                         if (edesc->icv_ool)
1057                                 icv = oicv + authsize;
1058                 } else
1059                         oicv = (char *)&edesc->link_tbl[0];
1060
1061                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1062         }
1063
1064         kfree(edesc);
1065
1066         aead_request_complete(req, err);
1067 }
1068
1069 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1070                                           struct talitos_desc *desc,
1071                                           void *context, int err)
1072 {
1073         struct aead_request *req = context;
1074         struct talitos_edesc *edesc;
1075
1076         edesc = container_of(desc, struct talitos_edesc, desc);
1077
1078         ipsec_esp_unmap(dev, edesc, req);
1079
1080         /* check ICV auth status */
1081         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082                      DESC_HDR_LO_ICCR1_PASS))
1083                 err = -EBADMSG;
1084
1085         kfree(edesc);
1086
1087         aead_request_complete(req, err);
1088 }
1089
1090 /*
1091  * convert scatterlist to SEC h/w link table format
1092  * stop at cryptlen bytes
1093  */
1094 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1095                                  unsigned int offset, int cryptlen,
1096                                  struct talitos_ptr *link_tbl_ptr)
1097 {
1098         int n_sg = sg_count;
1099         int count = 0;
1100
1101         while (cryptlen && sg && n_sg--) {
1102                 unsigned int len = sg_dma_len(sg);
1103
1104                 if (offset >= len) {
1105                         offset -= len;
1106                         goto next;
1107                 }
1108
1109                 len -= offset;
1110
1111                 if (len > cryptlen)
1112                         len = cryptlen;
1113
1114                 to_talitos_ptr(link_tbl_ptr + count,
1115                                sg_dma_address(sg) + offset, len, 0);
1116                 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1117                 count++;
1118                 cryptlen -= len;
1119                 offset = 0;
1120
1121 next:
1122                 sg = sg_next(sg);
1123         }
1124
1125         /* tag end of link table */
1126         if (count > 0)
1127                 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1128                                        DESC_PTR_LNKTBL_RETURN, 0);
1129
1130         return count;
1131 }
1132
1133 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1134                    unsigned int len, struct talitos_edesc *edesc,
1135                    struct talitos_ptr *ptr,
1136                    int sg_count, unsigned int offset, int tbl_off)
1137 {
1138         struct talitos_private *priv = dev_get_drvdata(dev);
1139         bool is_sec1 = has_ftr_sec1(priv);
1140
1141         if (sg_count == 1) {
1142                 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1143                 return sg_count;
1144         }
1145         if (is_sec1) {
1146                 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1147                 return sg_count;
1148         }
1149         sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
1150                                          &edesc->link_tbl[tbl_off]);
1151         if (sg_count == 1) {
1152                 /* Only one segment now, so no link tbl needed*/
1153                 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1154                 return sg_count;
1155         }
1156         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1157                             tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1158         to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1159
1160         return sg_count;
1161 }
1162
1163 /*
1164  * fill in and submit ipsec_esp descriptor
1165  */
1166 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1167                      void (*callback)(struct device *dev,
1168                                       struct talitos_desc *desc,
1169                                       void *context, int error))
1170 {
1171         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1172         unsigned int authsize = crypto_aead_authsize(aead);
1173         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1174         struct device *dev = ctx->dev;
1175         struct talitos_desc *desc = &edesc->desc;
1176         unsigned int cryptlen = areq->cryptlen;
1177         unsigned int ivsize = crypto_aead_ivsize(aead);
1178         int tbl_off = 0;
1179         int sg_count, ret;
1180         int sg_link_tbl_len;
1181         bool sync_needed = false;
1182         struct talitos_private *priv = dev_get_drvdata(dev);
1183         bool is_sec1 = has_ftr_sec1(priv);
1184         bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1185         struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1186         struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1187
1188         /* hmac key */
1189         to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1190
1191         sg_count = edesc->src_nents ?: 1;
1192         if (is_sec1 && sg_count > 1)
1193                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1194                                   areq->assoclen + cryptlen);
1195         else
1196                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1197                                       (areq->src == areq->dst) ?
1198                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1199
1200         /* hmac data */
1201         ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1202                              &desc->ptr[1], sg_count, 0, tbl_off);
1203
1204         if (ret > 1) {
1205                 tbl_off += ret;
1206                 sync_needed = true;
1207         }
1208
1209         /* cipher iv */
1210         to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1211
1212         /* cipher key */
1213         to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1214                        ctx->enckeylen, is_sec1);
1215
1216         /*
1217          * cipher in
1218          * map and adjust cipher len to aead request cryptlen.
1219          * extent is bytes of HMAC postpended to ciphertext,
1220          * typically 12 for ipsec
1221          */
1222         sg_link_tbl_len = cryptlen;
1223
1224         if (is_ipsec_esp) {
1225                 to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1226
1227                 if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
1228                         sg_link_tbl_len += authsize;
1229         }
1230
1231         ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
1232                              &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
1233
1234         if (ret > 1) {
1235                 tbl_off += ret;
1236                 sync_needed = true;
1237         }
1238
1239         /* cipher out */
1240         if (areq->src != areq->dst) {
1241                 sg_count = edesc->dst_nents ? : 1;
1242                 if (!is_sec1 || sg_count == 1)
1243                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1244         }
1245
1246         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1247                              sg_count, areq->assoclen, tbl_off);
1248
1249         if (is_ipsec_esp)
1250                 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1251
1252         /* ICV data */
1253         if (ret > 1) {
1254                 tbl_off += ret;
1255                 edesc->icv_ool = true;
1256                 sync_needed = true;
1257
1258                 if (is_ipsec_esp) {
1259                         struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1260                         int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1261                                      sizeof(struct talitos_ptr) + authsize;
1262
1263                         /* Add an entry to the link table for ICV data */
1264                         to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1265                         to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1266                                                is_sec1);
1267
1268                         /* icv data follows link tables */
1269                         to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1270                                        authsize, is_sec1);
1271                 } else {
1272                         dma_addr_t addr = edesc->dma_link_tbl;
1273
1274                         if (is_sec1)
1275                                 addr += areq->assoclen + cryptlen;
1276                         else
1277                                 addr += sizeof(struct talitos_ptr) * tbl_off;
1278
1279                         to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1280                 }
1281         } else if (!is_ipsec_esp) {
1282                 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1283                                      &desc->ptr[6], sg_count, areq->assoclen +
1284                                                               cryptlen,
1285                                      tbl_off);
1286                 if (ret > 1) {
1287                         tbl_off += ret;
1288                         edesc->icv_ool = true;
1289                         sync_needed = true;
1290                 } else {
1291                         edesc->icv_ool = false;
1292                 }
1293         } else {
1294                 edesc->icv_ool = false;
1295         }
1296
1297         /* iv out */
1298         if (is_ipsec_esp)
1299                 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1300                                        DMA_FROM_DEVICE);
1301
1302         if (sync_needed)
1303                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1304                                            edesc->dma_len,
1305                                            DMA_BIDIRECTIONAL);
1306
1307         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1308         if (ret != -EINPROGRESS) {
1309                 ipsec_esp_unmap(dev, edesc, areq);
1310                 kfree(edesc);
1311         }
1312         return ret;
1313 }
1314
1315 /*
1316  * allocate and map the extended descriptor
1317  */
1318 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1319                                                  struct scatterlist *src,
1320                                                  struct scatterlist *dst,
1321                                                  u8 *iv,
1322                                                  unsigned int assoclen,
1323                                                  unsigned int cryptlen,
1324                                                  unsigned int authsize,
1325                                                  unsigned int ivsize,
1326                                                  int icv_stashing,
1327                                                  u32 cryptoflags,
1328                                                  bool encrypt)
1329 {
1330         struct talitos_edesc *edesc;
1331         int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1332         dma_addr_t iv_dma = 0;
1333         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1334                       GFP_ATOMIC;
1335         struct talitos_private *priv = dev_get_drvdata(dev);
1336         bool is_sec1 = has_ftr_sec1(priv);
1337         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1338         void *err;
1339
1340         if (cryptlen + authsize > max_len) {
1341                 dev_err(dev, "length exceeds h/w max limit\n");
1342                 return ERR_PTR(-EINVAL);
1343         }
1344
1345         if (ivsize)
1346                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1347
1348         if (!dst || dst == src) {
1349                 src_len = assoclen + cryptlen + authsize;
1350                 src_nents = sg_nents_for_len(src, src_len);
1351                 if (src_nents < 0) {
1352                         dev_err(dev, "Invalid number of src SG.\n");
1353                         err = ERR_PTR(-EINVAL);
1354                         goto error_sg;
1355                 }
1356                 src_nents = (src_nents == 1) ? 0 : src_nents;
1357                 dst_nents = dst ? src_nents : 0;
1358                 dst_len = 0;
1359         } else { /* dst && dst != src*/
1360                 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1361                 src_nents = sg_nents_for_len(src, src_len);
1362                 if (src_nents < 0) {
1363                         dev_err(dev, "Invalid number of src SG.\n");
1364                         err = ERR_PTR(-EINVAL);
1365                         goto error_sg;
1366                 }
1367                 src_nents = (src_nents == 1) ? 0 : src_nents;
1368                 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1369                 dst_nents = sg_nents_for_len(dst, dst_len);
1370                 if (dst_nents < 0) {
1371                         dev_err(dev, "Invalid number of dst SG.\n");
1372                         err = ERR_PTR(-EINVAL);
1373                         goto error_sg;
1374                 }
1375                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1376         }
1377
1378         /*
1379          * allocate space for base edesc plus the link tables,
1380          * allowing for two separate entries for AD and generated ICV (+ 2),
1381          * and space for two sets of ICVs (stashed and generated)
1382          */
1383         alloc_len = sizeof(struct talitos_edesc);
1384         if (src_nents || dst_nents) {
1385                 if (is_sec1)
1386                         dma_len = (src_nents ? src_len : 0) +
1387                                   (dst_nents ? dst_len : 0);
1388                 else
1389                         dma_len = (src_nents + dst_nents + 2) *
1390                                   sizeof(struct talitos_ptr) + authsize * 2;
1391                 alloc_len += dma_len;
1392         } else {
1393                 dma_len = 0;
1394                 alloc_len += icv_stashing ? authsize : 0;
1395         }
1396
1397         /* if its a ahash, add space for a second desc next to the first one */
1398         if (is_sec1 && !dst)
1399                 alloc_len += sizeof(struct talitos_desc);
1400
1401         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1402         if (!edesc) {
1403                 dev_err(dev, "could not allocate edescriptor\n");
1404                 err = ERR_PTR(-ENOMEM);
1405                 goto error_sg;
1406         }
1407         memset(&edesc->desc, 0, sizeof(edesc->desc));
1408
1409         edesc->src_nents = src_nents;
1410         edesc->dst_nents = dst_nents;
1411         edesc->iv_dma = iv_dma;
1412         edesc->dma_len = dma_len;
1413         if (dma_len) {
1414                 void *addr = &edesc->link_tbl[0];
1415
1416                 if (is_sec1 && !dst)
1417                         addr += sizeof(struct talitos_desc);
1418                 edesc->dma_link_tbl = dma_map_single(dev, addr,
1419                                                      edesc->dma_len,
1420                                                      DMA_BIDIRECTIONAL);
1421         }
1422         return edesc;
1423 error_sg:
1424         if (iv_dma)
1425                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1426         return err;
1427 }
1428
1429 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1430                                               int icv_stashing, bool encrypt)
1431 {
1432         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1433         unsigned int authsize = crypto_aead_authsize(authenc);
1434         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1435         unsigned int ivsize = crypto_aead_ivsize(authenc);
1436
1437         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1438                                    iv, areq->assoclen, areq->cryptlen,
1439                                    authsize, ivsize, icv_stashing,
1440                                    areq->base.flags, encrypt);
1441 }
1442
1443 static int aead_encrypt(struct aead_request *req)
1444 {
1445         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1446         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1447         struct talitos_edesc *edesc;
1448
1449         /* allocate extended descriptor */
1450         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1451         if (IS_ERR(edesc))
1452                 return PTR_ERR(edesc);
1453
1454         /* set encrypt */
1455         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1456
1457         return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1458 }
1459
1460 static int aead_decrypt(struct aead_request *req)
1461 {
1462         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1463         unsigned int authsize = crypto_aead_authsize(authenc);
1464         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1465         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1466         struct talitos_edesc *edesc;
1467         struct scatterlist *sg;
1468         void *icvdata;
1469
1470         req->cryptlen -= authsize;
1471
1472         /* allocate extended descriptor */
1473         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1474         if (IS_ERR(edesc))
1475                 return PTR_ERR(edesc);
1476
1477         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1478             ((!edesc->src_nents && !edesc->dst_nents) ||
1479              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1480
1481                 /* decrypt and check the ICV */
1482                 edesc->desc.hdr = ctx->desc_hdr_template |
1483                                   DESC_HDR_DIR_INBOUND |
1484                                   DESC_HDR_MODE1_MDEU_CICV;
1485
1486                 /* reset integrity check result bits */
1487
1488                 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1489         }
1490
1491         /* Have to check the ICV with software */
1492         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1493
1494         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1495         if (edesc->dma_len)
1496                 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1497                                                    edesc->dst_nents + 2];
1498         else
1499                 icvdata = &edesc->link_tbl[0];
1500
1501         sg = sg_last(req->src, edesc->src_nents ? : 1);
1502
1503         memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1504
1505         return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1506 }
1507
1508 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1509                              const u8 *key, unsigned int keylen)
1510 {
1511         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1512         struct device *dev = ctx->dev;
1513         u32 tmp[DES_EXPKEY_WORDS];
1514
1515         if (keylen > TALITOS_MAX_KEY_SIZE) {
1516                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1517                 return -EINVAL;
1518         }
1519
1520         if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1521                      CRYPTO_TFM_REQ_WEAK_KEY) &&
1522             !des_ekey(tmp, key)) {
1523                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1524                 return -EINVAL;
1525         }
1526
1527         if (ctx->keylen)
1528                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1529
1530         memcpy(&ctx->key, key, keylen);
1531         ctx->keylen = keylen;
1532
1533         ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1534
1535         return 0;
1536 }
1537
1538 static void common_nonsnoop_unmap(struct device *dev,
1539                                   struct talitos_edesc *edesc,
1540                                   struct ablkcipher_request *areq)
1541 {
1542         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1543
1544         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1545         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1546
1547         if (edesc->dma_len)
1548                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1549                                  DMA_BIDIRECTIONAL);
1550 }
1551
1552 static void ablkcipher_done(struct device *dev,
1553                             struct talitos_desc *desc, void *context,
1554                             int err)
1555 {
1556         struct ablkcipher_request *areq = context;
1557         struct talitos_edesc *edesc;
1558
1559         edesc = container_of(desc, struct talitos_edesc, desc);
1560
1561         common_nonsnoop_unmap(dev, edesc, areq);
1562
1563         kfree(edesc);
1564
1565         areq->base.complete(&areq->base, err);
1566 }
1567
1568 static int common_nonsnoop(struct talitos_edesc *edesc,
1569                            struct ablkcipher_request *areq,
1570                            void (*callback) (struct device *dev,
1571                                              struct talitos_desc *desc,
1572                                              void *context, int error))
1573 {
1574         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1575         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1576         struct device *dev = ctx->dev;
1577         struct talitos_desc *desc = &edesc->desc;
1578         unsigned int cryptlen = areq->nbytes;
1579         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1580         int sg_count, ret;
1581         bool sync_needed = false;
1582         struct talitos_private *priv = dev_get_drvdata(dev);
1583         bool is_sec1 = has_ftr_sec1(priv);
1584
1585         /* first DWORD empty */
1586
1587         /* cipher iv */
1588         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1589
1590         /* cipher key */
1591         to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1592
1593         sg_count = edesc->src_nents ?: 1;
1594         if (is_sec1 && sg_count > 1)
1595                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1596                                   cryptlen);
1597         else
1598                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1599                                       (areq->src == areq->dst) ?
1600                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1601         /*
1602          * cipher in
1603          */
1604         sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1605                                   &desc->ptr[3], sg_count, 0, 0);
1606         if (sg_count > 1)
1607                 sync_needed = true;
1608
1609         /* cipher out */
1610         if (areq->src != areq->dst) {
1611                 sg_count = edesc->dst_nents ? : 1;
1612                 if (!is_sec1 || sg_count == 1)
1613                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1614         }
1615
1616         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1617                              sg_count, 0, (edesc->src_nents + 1));
1618         if (ret > 1)
1619                 sync_needed = true;
1620
1621         /* iv out */
1622         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1623                                DMA_FROM_DEVICE);
1624
1625         /* last DWORD empty */
1626
1627         if (sync_needed)
1628                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1629                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1630
1631         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1632         if (ret != -EINPROGRESS) {
1633                 common_nonsnoop_unmap(dev, edesc, areq);
1634                 kfree(edesc);
1635         }
1636         return ret;
1637 }
1638
1639 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1640                                                     areq, bool encrypt)
1641 {
1642         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1643         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1644         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1645
1646         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1647                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1648                                    areq->base.flags, encrypt);
1649 }
1650
1651 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1652 {
1653         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1654         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1655         struct talitos_edesc *edesc;
1656
1657         /* allocate extended descriptor */
1658         edesc = ablkcipher_edesc_alloc(areq, true);
1659         if (IS_ERR(edesc))
1660                 return PTR_ERR(edesc);
1661
1662         /* set encrypt */
1663         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1664
1665         return common_nonsnoop(edesc, areq, ablkcipher_done);
1666 }
1667
1668 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1669 {
1670         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1671         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1672         struct talitos_edesc *edesc;
1673
1674         /* allocate extended descriptor */
1675         edesc = ablkcipher_edesc_alloc(areq, false);
1676         if (IS_ERR(edesc))
1677                 return PTR_ERR(edesc);
1678
1679         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1680
1681         return common_nonsnoop(edesc, areq, ablkcipher_done);
1682 }
1683
1684 static void common_nonsnoop_hash_unmap(struct device *dev,
1685                                        struct talitos_edesc *edesc,
1686                                        struct ahash_request *areq)
1687 {
1688         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1689
1690         talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1691
1692         if (edesc->dma_len)
1693                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1694                                  DMA_BIDIRECTIONAL);
1695
1696         if (edesc->desc.next_desc)
1697                 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1698                                  TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1699 }
1700
1701 static void ahash_done(struct device *dev,
1702                        struct talitos_desc *desc, void *context,
1703                        int err)
1704 {
1705         struct ahash_request *areq = context;
1706         struct talitos_edesc *edesc =
1707                  container_of(desc, struct talitos_edesc, desc);
1708         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1709
1710         if (!req_ctx->last && req_ctx->to_hash_later) {
1711                 /* Position any partial block for next update/final/finup */
1712                 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1713                 req_ctx->nbuf = req_ctx->to_hash_later;
1714         }
1715         common_nonsnoop_hash_unmap(dev, edesc, areq);
1716
1717         kfree(edesc);
1718
1719         areq->base.complete(&areq->base, err);
1720 }
1721
1722 /*
1723  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1724  * ourself and submit a padded block
1725  */
1726 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1727                                struct talitos_edesc *edesc,
1728                                struct talitos_ptr *ptr)
1729 {
1730         static u8 padded_hash[64] = {
1731                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1732                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1733                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1734                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1735         };
1736
1737         pr_err_once("Bug in SEC1, padding ourself\n");
1738         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1739         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1740                                (char *)padded_hash, DMA_TO_DEVICE);
1741 }
1742
1743 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1744                                 struct ahash_request *areq, unsigned int length,
1745                                 unsigned int offset,
1746                                 void (*callback) (struct device *dev,
1747                                                   struct talitos_desc *desc,
1748                                                   void *context, int error))
1749 {
1750         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1751         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1752         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1753         struct device *dev = ctx->dev;
1754         struct talitos_desc *desc = &edesc->desc;
1755         int ret;
1756         bool sync_needed = false;
1757         struct talitos_private *priv = dev_get_drvdata(dev);
1758         bool is_sec1 = has_ftr_sec1(priv);
1759         int sg_count;
1760
1761         /* first DWORD empty */
1762
1763         /* hash context in */
1764         if (!req_ctx->first || req_ctx->swinit) {
1765                 to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
1766                                req_ctx->hw_context_size, is_sec1);
1767                 req_ctx->swinit = 0;
1768         }
1769         /* Indicate next op is not the first. */
1770         req_ctx->first = 0;
1771
1772         /* HMAC key */
1773         if (ctx->keylen)
1774                 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1775                                is_sec1);
1776
1777         if (is_sec1 && req_ctx->nbuf)
1778                 length -= req_ctx->nbuf;
1779
1780         sg_count = edesc->src_nents ?: 1;
1781         if (is_sec1 && sg_count > 1)
1782                 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1783                                    edesc->buf + sizeof(struct talitos_desc),
1784                                    length, req_ctx->nbuf);
1785         else if (length)
1786                 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1787                                       DMA_TO_DEVICE);
1788         /*
1789          * data in
1790          */
1791         if (is_sec1 && req_ctx->nbuf) {
1792                 dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
1793                                                     HASH_MAX_BLOCK_SIZE;
1794
1795                 to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
1796         } else {
1797                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1798                                           &desc->ptr[3], sg_count, offset, 0);
1799                 if (sg_count > 1)
1800                         sync_needed = true;
1801         }
1802
1803         /* fifth DWORD empty */
1804
1805         /* hash/HMAC out -or- hash context out */
1806         if (req_ctx->last)
1807                 map_single_talitos_ptr(dev, &desc->ptr[5],
1808                                        crypto_ahash_digestsize(tfm),
1809                                        areq->result, DMA_FROM_DEVICE);
1810         else
1811                 to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
1812                                req_ctx->hw_context_size, is_sec1);
1813
1814         /* last DWORD empty */
1815
1816         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1817                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1818
1819         if (is_sec1 && req_ctx->nbuf && length) {
1820                 struct talitos_desc *desc2 = desc + 1;
1821                 dma_addr_t next_desc;
1822
1823                 memset(desc2, 0, sizeof(*desc2));
1824                 desc2->hdr = desc->hdr;
1825                 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1826                 desc2->hdr1 = desc2->hdr;
1827                 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1828                 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1829                 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1830
1831                 to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
1832                                req_ctx->hw_context_size, is_sec1);
1833
1834                 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1835                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1836                                           &desc2->ptr[3], sg_count, offset, 0);
1837                 if (sg_count > 1)
1838                         sync_needed = true;
1839                 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1840                 if (req_ctx->last)
1841                         to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
1842                                        req_ctx->hw_context_size, is_sec1);
1843
1844                 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1845                                            DMA_BIDIRECTIONAL);
1846                 desc->next_desc = cpu_to_be32(next_desc);
1847         }
1848
1849         if (sync_needed)
1850                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1851                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1852
1853         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1854         if (ret != -EINPROGRESS) {
1855                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1856                 kfree(edesc);
1857         }
1858         return ret;
1859 }
1860
1861 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1862                                                unsigned int nbytes)
1863 {
1864         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1865         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1866         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1867         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1868         bool is_sec1 = has_ftr_sec1(priv);
1869
1870         if (is_sec1)
1871                 nbytes -= req_ctx->nbuf;
1872
1873         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1874                                    nbytes, 0, 0, 0, areq->base.flags, false);
1875 }
1876
1877 static int ahash_init(struct ahash_request *areq)
1878 {
1879         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1880         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1881         struct device *dev = ctx->dev;
1882         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1883         unsigned int size;
1884         struct talitos_private *priv = dev_get_drvdata(dev);
1885         bool is_sec1 = has_ftr_sec1(priv);
1886
1887         /* Initialize the context */
1888         req_ctx->buf_idx = 0;
1889         req_ctx->nbuf = 0;
1890         req_ctx->first = 1; /* first indicates h/w must init its context */
1891         req_ctx->swinit = 0; /* assume h/w init of context */
1892         size =  (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1893                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1894                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1895         req_ctx->hw_context_size = size;
1896
1897         if (ctx->dma_hw_context)
1898                 dma_unmap_single(dev, ctx->dma_hw_context, size,
1899                                  DMA_BIDIRECTIONAL);
1900         ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
1901                                              DMA_BIDIRECTIONAL);
1902         if (ctx->dma_buf)
1903                 dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
1904                                  DMA_TO_DEVICE);
1905         if (is_sec1)
1906                 ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
1907                                               sizeof(req_ctx->buf),
1908                                               DMA_TO_DEVICE);
1909         return 0;
1910 }
1911
1912 /*
1913  * on h/w without explicit sha224 support, we initialize h/w context
1914  * manually with sha224 constants, and tell it to run sha256.
1915  */
1916 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1917 {
1918         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1919         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1920         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1921         struct device *dev = ctx->dev;
1922
1923         ahash_init(areq);
1924         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1925
1926         req_ctx->hw_context[0] = SHA224_H0;
1927         req_ctx->hw_context[1] = SHA224_H1;
1928         req_ctx->hw_context[2] = SHA224_H2;
1929         req_ctx->hw_context[3] = SHA224_H3;
1930         req_ctx->hw_context[4] = SHA224_H4;
1931         req_ctx->hw_context[5] = SHA224_H5;
1932         req_ctx->hw_context[6] = SHA224_H6;
1933         req_ctx->hw_context[7] = SHA224_H7;
1934
1935         /* init 64-bit count */
1936         req_ctx->hw_context[8] = 0;
1937         req_ctx->hw_context[9] = 0;
1938
1939         dma_sync_single_for_device(dev, ctx->dma_hw_context,
1940                                    req_ctx->hw_context_size, DMA_TO_DEVICE);
1941
1942         return 0;
1943 }
1944
1945 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1946 {
1947         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1948         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1949         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1950         struct talitos_edesc *edesc;
1951         unsigned int blocksize =
1952                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1953         unsigned int nbytes_to_hash;
1954         unsigned int to_hash_later;
1955         unsigned int nsg;
1956         int nents;
1957         struct device *dev = ctx->dev;
1958         struct talitos_private *priv = dev_get_drvdata(dev);
1959         bool is_sec1 = has_ftr_sec1(priv);
1960         int offset = 0;
1961         u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1962
1963         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1964                 /* Buffer up to one whole block */
1965                 nents = sg_nents_for_len(areq->src, nbytes);
1966                 if (nents < 0) {
1967                         dev_err(ctx->dev, "Invalid number of src SG.\n");
1968                         return nents;
1969                 }
1970                 sg_copy_to_buffer(areq->src, nents,
1971                                   ctx_buf + req_ctx->nbuf, nbytes);
1972                 req_ctx->nbuf += nbytes;
1973                 return 0;
1974         }
1975
1976         /* At least (blocksize + 1) bytes are available to hash */
1977         nbytes_to_hash = nbytes + req_ctx->nbuf;
1978         to_hash_later = nbytes_to_hash & (blocksize - 1);
1979
1980         if (req_ctx->last)
1981                 to_hash_later = 0;
1982         else if (to_hash_later)
1983                 /* There is a partial block. Hash the full block(s) now */
1984                 nbytes_to_hash -= to_hash_later;
1985         else {
1986                 /* Keep one block buffered */
1987                 nbytes_to_hash -= blocksize;
1988                 to_hash_later = blocksize;
1989         }
1990
1991         /* Chain in any previously buffered data */
1992         if (!is_sec1 && req_ctx->nbuf) {
1993                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1994                 sg_init_table(req_ctx->bufsl, nsg);
1995                 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
1996                 if (nsg > 1)
1997                         sg_chain(req_ctx->bufsl, 2, areq->src);
1998                 req_ctx->psrc = req_ctx->bufsl;
1999         } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2000                 if (nbytes_to_hash > blocksize)
2001                         offset = blocksize - req_ctx->nbuf;
2002                 else
2003                         offset = nbytes_to_hash - req_ctx->nbuf;
2004                 nents = sg_nents_for_len(areq->src, offset);
2005                 if (nents < 0) {
2006                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2007                         return nents;
2008                 }
2009                 sg_copy_to_buffer(areq->src, nents,
2010                                   ctx_buf + req_ctx->nbuf, offset);
2011                 req_ctx->nbuf += offset;
2012                 req_ctx->psrc = areq->src;
2013         } else
2014                 req_ctx->psrc = areq->src;
2015
2016         if (to_hash_later) {
2017                 nents = sg_nents_for_len(areq->src, nbytes);
2018                 if (nents < 0) {
2019                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2020                         return nents;
2021                 }
2022                 sg_pcopy_to_buffer(areq->src, nents,
2023                                    req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2024                                       to_hash_later,
2025                                       nbytes - to_hash_later);
2026         }
2027         req_ctx->to_hash_later = to_hash_later;
2028
2029         /* Allocate extended descriptor */
2030         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2031         if (IS_ERR(edesc))
2032                 return PTR_ERR(edesc);
2033
2034         edesc->desc.hdr = ctx->desc_hdr_template;
2035
2036         /* On last one, request SEC to pad; otherwise continue */
2037         if (req_ctx->last)
2038                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2039         else
2040                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2041
2042         /* request SEC to INIT hash. */
2043         if (req_ctx->first && !req_ctx->swinit)
2044                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2045         if (is_sec1) {
2046                 dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
2047                                                     HASH_MAX_BLOCK_SIZE;
2048
2049                 dma_sync_single_for_device(dev, dma_buf,
2050                                            req_ctx->nbuf, DMA_TO_DEVICE);
2051         }
2052
2053         /* When the tfm context has a keylen, it's an HMAC.
2054          * A first or last (ie. not middle) descriptor must request HMAC.
2055          */
2056         if (ctx->keylen && (req_ctx->first || req_ctx->last))
2057                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2058
2059         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2060                                     ahash_done);
2061 }
2062
2063 static int ahash_update(struct ahash_request *areq)
2064 {
2065         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2066
2067         req_ctx->last = 0;
2068
2069         return ahash_process_req(areq, areq->nbytes);
2070 }
2071
2072 static int ahash_final(struct ahash_request *areq)
2073 {
2074         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2075
2076         req_ctx->last = 1;
2077
2078         return ahash_process_req(areq, 0);
2079 }
2080
2081 static int ahash_finup(struct ahash_request *areq)
2082 {
2083         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2084
2085         req_ctx->last = 1;
2086
2087         return ahash_process_req(areq, areq->nbytes);
2088 }
2089
2090 static int ahash_digest(struct ahash_request *areq)
2091 {
2092         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2093         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2094
2095         ahash->init(areq);
2096         req_ctx->last = 1;
2097
2098         return ahash_process_req(areq, areq->nbytes);
2099 }
2100
2101 static int ahash_export(struct ahash_request *areq, void *out)
2102 {
2103         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2104         struct talitos_export_state *export = out;
2105         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2106         struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
2107         struct device *dev = ctx->dev;
2108
2109         dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
2110                                 req_ctx->hw_context_size, DMA_FROM_DEVICE);
2111         memcpy(export->hw_context, req_ctx->hw_context,
2112                req_ctx->hw_context_size);
2113         memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2114         export->swinit = req_ctx->swinit;
2115         export->first = req_ctx->first;
2116         export->last = req_ctx->last;
2117         export->to_hash_later = req_ctx->to_hash_later;
2118         export->nbuf = req_ctx->nbuf;
2119
2120         return 0;
2121 }
2122
2123 static int ahash_import(struct ahash_request *areq, const void *in)
2124 {
2125         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2126         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2127         const struct talitos_export_state *export = in;
2128         unsigned int size;
2129         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2130         struct device *dev = ctx->dev;
2131         struct talitos_private *priv = dev_get_drvdata(dev);
2132         bool is_sec1 = has_ftr_sec1(priv);
2133
2134         memset(req_ctx, 0, sizeof(*req_ctx));
2135         size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2136                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2137                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2138         req_ctx->hw_context_size = size;
2139         if (ctx->dma_hw_context)
2140                 dma_unmap_single(dev, ctx->dma_hw_context, size,
2141                                  DMA_BIDIRECTIONAL);
2142
2143         memcpy(req_ctx->hw_context, export->hw_context, size);
2144         ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
2145                                              DMA_BIDIRECTIONAL);
2146         if (ctx->dma_buf)
2147                 dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
2148                                  DMA_TO_DEVICE);
2149         memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2150         if (is_sec1)
2151                 ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
2152                                               sizeof(req_ctx->buf),
2153                                               DMA_TO_DEVICE);
2154         req_ctx->swinit = export->swinit;
2155         req_ctx->first = export->first;
2156         req_ctx->last = export->last;
2157         req_ctx->to_hash_later = export->to_hash_later;
2158         req_ctx->nbuf = export->nbuf;
2159
2160         return 0;
2161 }
2162
2163 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2164                    u8 *hash)
2165 {
2166         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2167
2168         struct scatterlist sg[1];
2169         struct ahash_request *req;
2170         struct crypto_wait wait;
2171         int ret;
2172
2173         crypto_init_wait(&wait);
2174
2175         req = ahash_request_alloc(tfm, GFP_KERNEL);
2176         if (!req)
2177                 return -ENOMEM;
2178
2179         /* Keep tfm keylen == 0 during hash of the long key */
2180         ctx->keylen = 0;
2181         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2182                                    crypto_req_done, &wait);
2183
2184         sg_init_one(&sg[0], key, keylen);
2185
2186         ahash_request_set_crypt(req, sg, hash, keylen);
2187         ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2188
2189         ahash_request_free(req);
2190
2191         return ret;
2192 }
2193
2194 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2195                         unsigned int keylen)
2196 {
2197         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2198         struct device *dev = ctx->dev;
2199         unsigned int blocksize =
2200                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2201         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2202         unsigned int keysize = keylen;
2203         u8 hash[SHA512_DIGEST_SIZE];
2204         int ret;
2205
2206         if (keylen <= blocksize)
2207                 memcpy(ctx->key, key, keysize);
2208         else {
2209                 /* Must get the hash of the long key */
2210                 ret = keyhash(tfm, key, keylen, hash);
2211
2212                 if (ret) {
2213                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2214                         return -EINVAL;
2215                 }
2216
2217                 keysize = digestsize;
2218                 memcpy(ctx->key, hash, digestsize);
2219         }
2220
2221         if (ctx->keylen)
2222                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2223
2224         ctx->keylen = keysize;
2225         ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2226
2227         return 0;
2228 }
2229
2230
2231 struct talitos_alg_template {
2232         u32 type;
2233         u32 priority;
2234         union {
2235                 struct crypto_alg crypto;
2236                 struct ahash_alg hash;
2237                 struct aead_alg aead;
2238         } alg;
2239         __be32 desc_hdr_template;
2240 };
2241
2242 static struct talitos_alg_template driver_algs[] = {
2243         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2244         {       .type = CRYPTO_ALG_TYPE_AEAD,
2245                 .alg.aead = {
2246                         .base = {
2247                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2248                                 .cra_driver_name = "authenc-hmac-sha1-"
2249                                                    "cbc-aes-talitos",
2250                                 .cra_blocksize = AES_BLOCK_SIZE,
2251                                 .cra_flags = CRYPTO_ALG_ASYNC,
2252                         },
2253                         .ivsize = AES_BLOCK_SIZE,
2254                         .maxauthsize = SHA1_DIGEST_SIZE,
2255                 },
2256                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2257                                      DESC_HDR_SEL0_AESU |
2258                                      DESC_HDR_MODE0_AESU_CBC |
2259                                      DESC_HDR_SEL1_MDEUA |
2260                                      DESC_HDR_MODE1_MDEU_INIT |
2261                                      DESC_HDR_MODE1_MDEU_PAD |
2262                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2263         },
2264         {       .type = CRYPTO_ALG_TYPE_AEAD,
2265                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2266                 .alg.aead = {
2267                         .base = {
2268                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2269                                 .cra_driver_name = "authenc-hmac-sha1-"
2270                                                    "cbc-aes-talitos",
2271                                 .cra_blocksize = AES_BLOCK_SIZE,
2272                                 .cra_flags = CRYPTO_ALG_ASYNC,
2273                         },
2274                         .ivsize = AES_BLOCK_SIZE,
2275                         .maxauthsize = SHA1_DIGEST_SIZE,
2276                 },
2277                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2278                                      DESC_HDR_SEL0_AESU |
2279                                      DESC_HDR_MODE0_AESU_CBC |
2280                                      DESC_HDR_SEL1_MDEUA |
2281                                      DESC_HDR_MODE1_MDEU_INIT |
2282                                      DESC_HDR_MODE1_MDEU_PAD |
2283                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2284         },
2285         {       .type = CRYPTO_ALG_TYPE_AEAD,
2286                 .alg.aead = {
2287                         .base = {
2288                                 .cra_name = "authenc(hmac(sha1),"
2289                                             "cbc(des3_ede))",
2290                                 .cra_driver_name = "authenc-hmac-sha1-"
2291                                                    "cbc-3des-talitos",
2292                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2293                                 .cra_flags = CRYPTO_ALG_ASYNC,
2294                         },
2295                         .ivsize = DES3_EDE_BLOCK_SIZE,
2296                         .maxauthsize = SHA1_DIGEST_SIZE,
2297                 },
2298                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2299                                      DESC_HDR_SEL0_DEU |
2300                                      DESC_HDR_MODE0_DEU_CBC |
2301                                      DESC_HDR_MODE0_DEU_3DES |
2302                                      DESC_HDR_SEL1_MDEUA |
2303                                      DESC_HDR_MODE1_MDEU_INIT |
2304                                      DESC_HDR_MODE1_MDEU_PAD |
2305                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2306         },
2307         {       .type = CRYPTO_ALG_TYPE_AEAD,
2308                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2309                 .alg.aead = {
2310                         .base = {
2311                                 .cra_name = "authenc(hmac(sha1),"
2312                                             "cbc(des3_ede))",
2313                                 .cra_driver_name = "authenc-hmac-sha1-"
2314                                                    "cbc-3des-talitos",
2315                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2316                                 .cra_flags = CRYPTO_ALG_ASYNC,
2317                         },
2318                         .ivsize = DES3_EDE_BLOCK_SIZE,
2319                         .maxauthsize = SHA1_DIGEST_SIZE,
2320                 },
2321                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2322                                      DESC_HDR_SEL0_DEU |
2323                                      DESC_HDR_MODE0_DEU_CBC |
2324                                      DESC_HDR_MODE0_DEU_3DES |
2325                                      DESC_HDR_SEL1_MDEUA |
2326                                      DESC_HDR_MODE1_MDEU_INIT |
2327                                      DESC_HDR_MODE1_MDEU_PAD |
2328                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2329         },
2330         {       .type = CRYPTO_ALG_TYPE_AEAD,
2331                 .alg.aead = {
2332                         .base = {
2333                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2334                                 .cra_driver_name = "authenc-hmac-sha224-"
2335                                                    "cbc-aes-talitos",
2336                                 .cra_blocksize = AES_BLOCK_SIZE,
2337                                 .cra_flags = CRYPTO_ALG_ASYNC,
2338                         },
2339                         .ivsize = AES_BLOCK_SIZE,
2340                         .maxauthsize = SHA224_DIGEST_SIZE,
2341                 },
2342                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2343                                      DESC_HDR_SEL0_AESU |
2344                                      DESC_HDR_MODE0_AESU_CBC |
2345                                      DESC_HDR_SEL1_MDEUA |
2346                                      DESC_HDR_MODE1_MDEU_INIT |
2347                                      DESC_HDR_MODE1_MDEU_PAD |
2348                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2349         },
2350         {       .type = CRYPTO_ALG_TYPE_AEAD,
2351                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2352                 .alg.aead = {
2353                         .base = {
2354                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2355                                 .cra_driver_name = "authenc-hmac-sha224-"
2356                                                    "cbc-aes-talitos",
2357                                 .cra_blocksize = AES_BLOCK_SIZE,
2358                                 .cra_flags = CRYPTO_ALG_ASYNC,
2359                         },
2360                         .ivsize = AES_BLOCK_SIZE,
2361                         .maxauthsize = SHA224_DIGEST_SIZE,
2362                 },
2363                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2364                                      DESC_HDR_SEL0_AESU |
2365                                      DESC_HDR_MODE0_AESU_CBC |
2366                                      DESC_HDR_SEL1_MDEUA |
2367                                      DESC_HDR_MODE1_MDEU_INIT |
2368                                      DESC_HDR_MODE1_MDEU_PAD |
2369                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2370         },
2371         {       .type = CRYPTO_ALG_TYPE_AEAD,
2372                 .alg.aead = {
2373                         .base = {
2374                                 .cra_name = "authenc(hmac(sha224),"
2375                                             "cbc(des3_ede))",
2376                                 .cra_driver_name = "authenc-hmac-sha224-"
2377                                                    "cbc-3des-talitos",
2378                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2379                                 .cra_flags = CRYPTO_ALG_ASYNC,
2380                         },
2381                         .ivsize = DES3_EDE_BLOCK_SIZE,
2382                         .maxauthsize = SHA224_DIGEST_SIZE,
2383                 },
2384                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2385                                      DESC_HDR_SEL0_DEU |
2386                                      DESC_HDR_MODE0_DEU_CBC |
2387                                      DESC_HDR_MODE0_DEU_3DES |
2388                                      DESC_HDR_SEL1_MDEUA |
2389                                      DESC_HDR_MODE1_MDEU_INIT |
2390                                      DESC_HDR_MODE1_MDEU_PAD |
2391                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2392         },
2393         {       .type = CRYPTO_ALG_TYPE_AEAD,
2394                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2395                 .alg.aead = {
2396                         .base = {
2397                                 .cra_name = "authenc(hmac(sha224),"
2398                                             "cbc(des3_ede))",
2399                                 .cra_driver_name = "authenc-hmac-sha224-"
2400                                                    "cbc-3des-talitos",
2401                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2402                                 .cra_flags = CRYPTO_ALG_ASYNC,
2403                         },
2404                         .ivsize = DES3_EDE_BLOCK_SIZE,
2405                         .maxauthsize = SHA224_DIGEST_SIZE,
2406                 },
2407                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2408                                      DESC_HDR_SEL0_DEU |
2409                                      DESC_HDR_MODE0_DEU_CBC |
2410                                      DESC_HDR_MODE0_DEU_3DES |
2411                                      DESC_HDR_SEL1_MDEUA |
2412                                      DESC_HDR_MODE1_MDEU_INIT |
2413                                      DESC_HDR_MODE1_MDEU_PAD |
2414                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2415         },
2416         {       .type = CRYPTO_ALG_TYPE_AEAD,
2417                 .alg.aead = {
2418                         .base = {
2419                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2420                                 .cra_driver_name = "authenc-hmac-sha256-"
2421                                                    "cbc-aes-talitos",
2422                                 .cra_blocksize = AES_BLOCK_SIZE,
2423                                 .cra_flags = CRYPTO_ALG_ASYNC,
2424                         },
2425                         .ivsize = AES_BLOCK_SIZE,
2426                         .maxauthsize = SHA256_DIGEST_SIZE,
2427                 },
2428                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2429                                      DESC_HDR_SEL0_AESU |
2430                                      DESC_HDR_MODE0_AESU_CBC |
2431                                      DESC_HDR_SEL1_MDEUA |
2432                                      DESC_HDR_MODE1_MDEU_INIT |
2433                                      DESC_HDR_MODE1_MDEU_PAD |
2434                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2435         },
2436         {       .type = CRYPTO_ALG_TYPE_AEAD,
2437                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2438                 .alg.aead = {
2439                         .base = {
2440                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2441                                 .cra_driver_name = "authenc-hmac-sha256-"
2442                                                    "cbc-aes-talitos",
2443                                 .cra_blocksize = AES_BLOCK_SIZE,
2444                                 .cra_flags = CRYPTO_ALG_ASYNC,
2445                         },
2446                         .ivsize = AES_BLOCK_SIZE,
2447                         .maxauthsize = SHA256_DIGEST_SIZE,
2448                 },
2449                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2450                                      DESC_HDR_SEL0_AESU |
2451                                      DESC_HDR_MODE0_AESU_CBC |
2452                                      DESC_HDR_SEL1_MDEUA |
2453                                      DESC_HDR_MODE1_MDEU_INIT |
2454                                      DESC_HDR_MODE1_MDEU_PAD |
2455                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2456         },
2457         {       .type = CRYPTO_ALG_TYPE_AEAD,
2458                 .alg.aead = {
2459                         .base = {
2460                                 .cra_name = "authenc(hmac(sha256),"
2461                                             "cbc(des3_ede))",
2462                                 .cra_driver_name = "authenc-hmac-sha256-"
2463                                                    "cbc-3des-talitos",
2464                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2465                                 .cra_flags = CRYPTO_ALG_ASYNC,
2466                         },
2467                         .ivsize = DES3_EDE_BLOCK_SIZE,
2468                         .maxauthsize = SHA256_DIGEST_SIZE,
2469                 },
2470                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2471                                      DESC_HDR_SEL0_DEU |
2472                                      DESC_HDR_MODE0_DEU_CBC |
2473                                      DESC_HDR_MODE0_DEU_3DES |
2474                                      DESC_HDR_SEL1_MDEUA |
2475                                      DESC_HDR_MODE1_MDEU_INIT |
2476                                      DESC_HDR_MODE1_MDEU_PAD |
2477                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2478         },
2479         {       .type = CRYPTO_ALG_TYPE_AEAD,
2480                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2481                 .alg.aead = {
2482                         .base = {
2483                                 .cra_name = "authenc(hmac(sha256),"
2484                                             "cbc(des3_ede))",
2485                                 .cra_driver_name = "authenc-hmac-sha256-"
2486                                                    "cbc-3des-talitos",
2487                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2488                                 .cra_flags = CRYPTO_ALG_ASYNC,
2489                         },
2490                         .ivsize = DES3_EDE_BLOCK_SIZE,
2491                         .maxauthsize = SHA256_DIGEST_SIZE,
2492                 },
2493                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2494                                      DESC_HDR_SEL0_DEU |
2495                                      DESC_HDR_MODE0_DEU_CBC |
2496                                      DESC_HDR_MODE0_DEU_3DES |
2497                                      DESC_HDR_SEL1_MDEUA |
2498                                      DESC_HDR_MODE1_MDEU_INIT |
2499                                      DESC_HDR_MODE1_MDEU_PAD |
2500                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2501         },
2502         {       .type = CRYPTO_ALG_TYPE_AEAD,
2503                 .alg.aead = {
2504                         .base = {
2505                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2506                                 .cra_driver_name = "authenc-hmac-sha384-"
2507                                                    "cbc-aes-talitos",
2508                                 .cra_blocksize = AES_BLOCK_SIZE,
2509                                 .cra_flags = CRYPTO_ALG_ASYNC,
2510                         },
2511                         .ivsize = AES_BLOCK_SIZE,
2512                         .maxauthsize = SHA384_DIGEST_SIZE,
2513                 },
2514                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2515                                      DESC_HDR_SEL0_AESU |
2516                                      DESC_HDR_MODE0_AESU_CBC |
2517                                      DESC_HDR_SEL1_MDEUB |
2518                                      DESC_HDR_MODE1_MDEU_INIT |
2519                                      DESC_HDR_MODE1_MDEU_PAD |
2520                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2521         },
2522         {       .type = CRYPTO_ALG_TYPE_AEAD,
2523                 .alg.aead = {
2524                         .base = {
2525                                 .cra_name = "authenc(hmac(sha384),"
2526                                             "cbc(des3_ede))",
2527                                 .cra_driver_name = "authenc-hmac-sha384-"
2528                                                    "cbc-3des-talitos",
2529                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2530                                 .cra_flags = CRYPTO_ALG_ASYNC,
2531                         },
2532                         .ivsize = DES3_EDE_BLOCK_SIZE,
2533                         .maxauthsize = SHA384_DIGEST_SIZE,
2534                 },
2535                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2536                                      DESC_HDR_SEL0_DEU |
2537                                      DESC_HDR_MODE0_DEU_CBC |
2538                                      DESC_HDR_MODE0_DEU_3DES |
2539                                      DESC_HDR_SEL1_MDEUB |
2540                                      DESC_HDR_MODE1_MDEU_INIT |
2541                                      DESC_HDR_MODE1_MDEU_PAD |
2542                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2543         },
2544         {       .type = CRYPTO_ALG_TYPE_AEAD,
2545                 .alg.aead = {
2546                         .base = {
2547                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2548                                 .cra_driver_name = "authenc-hmac-sha512-"
2549                                                    "cbc-aes-talitos",
2550                                 .cra_blocksize = AES_BLOCK_SIZE,
2551                                 .cra_flags = CRYPTO_ALG_ASYNC,
2552                         },
2553                         .ivsize = AES_BLOCK_SIZE,
2554                         .maxauthsize = SHA512_DIGEST_SIZE,
2555                 },
2556                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2557                                      DESC_HDR_SEL0_AESU |
2558                                      DESC_HDR_MODE0_AESU_CBC |
2559                                      DESC_HDR_SEL1_MDEUB |
2560                                      DESC_HDR_MODE1_MDEU_INIT |
2561                                      DESC_HDR_MODE1_MDEU_PAD |
2562                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2563         },
2564         {       .type = CRYPTO_ALG_TYPE_AEAD,
2565                 .alg.aead = {
2566                         .base = {
2567                                 .cra_name = "authenc(hmac(sha512),"
2568                                             "cbc(des3_ede))",
2569                                 .cra_driver_name = "authenc-hmac-sha512-"
2570                                                    "cbc-3des-talitos",
2571                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2572                                 .cra_flags = CRYPTO_ALG_ASYNC,
2573                         },
2574                         .ivsize = DES3_EDE_BLOCK_SIZE,
2575                         .maxauthsize = SHA512_DIGEST_SIZE,
2576                 },
2577                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2578                                      DESC_HDR_SEL0_DEU |
2579                                      DESC_HDR_MODE0_DEU_CBC |
2580                                      DESC_HDR_MODE0_DEU_3DES |
2581                                      DESC_HDR_SEL1_MDEUB |
2582                                      DESC_HDR_MODE1_MDEU_INIT |
2583                                      DESC_HDR_MODE1_MDEU_PAD |
2584                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2585         },
2586         {       .type = CRYPTO_ALG_TYPE_AEAD,
2587                 .alg.aead = {
2588                         .base = {
2589                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2590                                 .cra_driver_name = "authenc-hmac-md5-"
2591                                                    "cbc-aes-talitos",
2592                                 .cra_blocksize = AES_BLOCK_SIZE,
2593                                 .cra_flags = CRYPTO_ALG_ASYNC,
2594                         },
2595                         .ivsize = AES_BLOCK_SIZE,
2596                         .maxauthsize = MD5_DIGEST_SIZE,
2597                 },
2598                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2599                                      DESC_HDR_SEL0_AESU |
2600                                      DESC_HDR_MODE0_AESU_CBC |
2601                                      DESC_HDR_SEL1_MDEUA |
2602                                      DESC_HDR_MODE1_MDEU_INIT |
2603                                      DESC_HDR_MODE1_MDEU_PAD |
2604                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2605         },
2606         {       .type = CRYPTO_ALG_TYPE_AEAD,
2607                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2608                 .alg.aead = {
2609                         .base = {
2610                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2611                                 .cra_driver_name = "authenc-hmac-md5-"
2612                                                    "cbc-aes-talitos",
2613                                 .cra_blocksize = AES_BLOCK_SIZE,
2614                                 .cra_flags = CRYPTO_ALG_ASYNC,
2615                         },
2616                         .ivsize = AES_BLOCK_SIZE,
2617                         .maxauthsize = MD5_DIGEST_SIZE,
2618                 },
2619                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2620                                      DESC_HDR_SEL0_AESU |
2621                                      DESC_HDR_MODE0_AESU_CBC |
2622                                      DESC_HDR_SEL1_MDEUA |
2623                                      DESC_HDR_MODE1_MDEU_INIT |
2624                                      DESC_HDR_MODE1_MDEU_PAD |
2625                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2626         },
2627         {       .type = CRYPTO_ALG_TYPE_AEAD,
2628                 .alg.aead = {
2629                         .base = {
2630                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2631                                 .cra_driver_name = "authenc-hmac-md5-"
2632                                                    "cbc-3des-talitos",
2633                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2634                                 .cra_flags = CRYPTO_ALG_ASYNC,
2635                         },
2636                         .ivsize = DES3_EDE_BLOCK_SIZE,
2637                         .maxauthsize = MD5_DIGEST_SIZE,
2638                 },
2639                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2640                                      DESC_HDR_SEL0_DEU |
2641                                      DESC_HDR_MODE0_DEU_CBC |
2642                                      DESC_HDR_MODE0_DEU_3DES |
2643                                      DESC_HDR_SEL1_MDEUA |
2644                                      DESC_HDR_MODE1_MDEU_INIT |
2645                                      DESC_HDR_MODE1_MDEU_PAD |
2646                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2647         },
2648         {       .type = CRYPTO_ALG_TYPE_AEAD,
2649                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2650                 .alg.aead = {
2651                         .base = {
2652                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2653                                 .cra_driver_name = "authenc-hmac-md5-"
2654                                                    "cbc-3des-talitos",
2655                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2656                                 .cra_flags = CRYPTO_ALG_ASYNC,
2657                         },
2658                         .ivsize = DES3_EDE_BLOCK_SIZE,
2659                         .maxauthsize = MD5_DIGEST_SIZE,
2660                 },
2661                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2662                                      DESC_HDR_SEL0_DEU |
2663                                      DESC_HDR_MODE0_DEU_CBC |
2664                                      DESC_HDR_MODE0_DEU_3DES |
2665                                      DESC_HDR_SEL1_MDEUA |
2666                                      DESC_HDR_MODE1_MDEU_INIT |
2667                                      DESC_HDR_MODE1_MDEU_PAD |
2668                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2669         },
2670         /* ABLKCIPHER algorithms. */
2671         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2672                 .alg.crypto = {
2673                         .cra_name = "ecb(aes)",
2674                         .cra_driver_name = "ecb-aes-talitos",
2675                         .cra_blocksize = AES_BLOCK_SIZE,
2676                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2677                                      CRYPTO_ALG_ASYNC,
2678                         .cra_ablkcipher = {
2679                                 .min_keysize = AES_MIN_KEY_SIZE,
2680                                 .max_keysize = AES_MAX_KEY_SIZE,
2681                                 .ivsize = AES_BLOCK_SIZE,
2682                         }
2683                 },
2684                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2685                                      DESC_HDR_SEL0_AESU,
2686         },
2687         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2688                 .alg.crypto = {
2689                         .cra_name = "cbc(aes)",
2690                         .cra_driver_name = "cbc-aes-talitos",
2691                         .cra_blocksize = AES_BLOCK_SIZE,
2692                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2693                                      CRYPTO_ALG_ASYNC,
2694                         .cra_ablkcipher = {
2695                                 .min_keysize = AES_MIN_KEY_SIZE,
2696                                 .max_keysize = AES_MAX_KEY_SIZE,
2697                                 .ivsize = AES_BLOCK_SIZE,
2698                         }
2699                 },
2700                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2701                                      DESC_HDR_SEL0_AESU |
2702                                      DESC_HDR_MODE0_AESU_CBC,
2703         },
2704         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2705                 .alg.crypto = {
2706                         .cra_name = "ctr(aes)",
2707                         .cra_driver_name = "ctr-aes-talitos",
2708                         .cra_blocksize = AES_BLOCK_SIZE,
2709                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2710                                      CRYPTO_ALG_ASYNC,
2711                         .cra_ablkcipher = {
2712                                 .min_keysize = AES_MIN_KEY_SIZE,
2713                                 .max_keysize = AES_MAX_KEY_SIZE,
2714                                 .ivsize = AES_BLOCK_SIZE,
2715                         }
2716                 },
2717                 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2718                                      DESC_HDR_SEL0_AESU |
2719                                      DESC_HDR_MODE0_AESU_CTR,
2720         },
2721         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2722                 .alg.crypto = {
2723                         .cra_name = "ecb(des)",
2724                         .cra_driver_name = "ecb-des-talitos",
2725                         .cra_blocksize = DES_BLOCK_SIZE,
2726                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2727                                      CRYPTO_ALG_ASYNC,
2728                         .cra_ablkcipher = {
2729                                 .min_keysize = DES_KEY_SIZE,
2730                                 .max_keysize = DES_KEY_SIZE,
2731                                 .ivsize = DES_BLOCK_SIZE,
2732                         }
2733                 },
2734                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2735                                      DESC_HDR_SEL0_DEU,
2736         },
2737         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2738                 .alg.crypto = {
2739                         .cra_name = "cbc(des)",
2740                         .cra_driver_name = "cbc-des-talitos",
2741                         .cra_blocksize = DES_BLOCK_SIZE,
2742                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2743                                      CRYPTO_ALG_ASYNC,
2744                         .cra_ablkcipher = {
2745                                 .min_keysize = DES_KEY_SIZE,
2746                                 .max_keysize = DES_KEY_SIZE,
2747                                 .ivsize = DES_BLOCK_SIZE,
2748                         }
2749                 },
2750                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2751                                      DESC_HDR_SEL0_DEU |
2752                                      DESC_HDR_MODE0_DEU_CBC,
2753         },
2754         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2755                 .alg.crypto = {
2756                         .cra_name = "ecb(des3_ede)",
2757                         .cra_driver_name = "ecb-3des-talitos",
2758                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2759                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2760                                      CRYPTO_ALG_ASYNC,
2761                         .cra_ablkcipher = {
2762                                 .min_keysize = DES3_EDE_KEY_SIZE,
2763                                 .max_keysize = DES3_EDE_KEY_SIZE,
2764                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2765                         }
2766                 },
2767                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2768                                      DESC_HDR_SEL0_DEU |
2769                                      DESC_HDR_MODE0_DEU_3DES,
2770         },
2771         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2772                 .alg.crypto = {
2773                         .cra_name = "cbc(des3_ede)",
2774                         .cra_driver_name = "cbc-3des-talitos",
2775                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2776                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2777                                      CRYPTO_ALG_ASYNC,
2778                         .cra_ablkcipher = {
2779                                 .min_keysize = DES3_EDE_KEY_SIZE,
2780                                 .max_keysize = DES3_EDE_KEY_SIZE,
2781                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2782                         }
2783                 },
2784                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2785                                      DESC_HDR_SEL0_DEU |
2786                                      DESC_HDR_MODE0_DEU_CBC |
2787                                      DESC_HDR_MODE0_DEU_3DES,
2788         },
2789         /* AHASH algorithms. */
2790         {       .type = CRYPTO_ALG_TYPE_AHASH,
2791                 .alg.hash = {
2792                         .halg.digestsize = MD5_DIGEST_SIZE,
2793                         .halg.statesize = sizeof(struct talitos_export_state),
2794                         .halg.base = {
2795                                 .cra_name = "md5",
2796                                 .cra_driver_name = "md5-talitos",
2797                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2798                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2799                                              CRYPTO_ALG_ASYNC,
2800                         }
2801                 },
2802                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2803                                      DESC_HDR_SEL0_MDEUA |
2804                                      DESC_HDR_MODE0_MDEU_MD5,
2805         },
2806         {       .type = CRYPTO_ALG_TYPE_AHASH,
2807                 .alg.hash = {
2808                         .halg.digestsize = SHA1_DIGEST_SIZE,
2809                         .halg.statesize = sizeof(struct talitos_export_state),
2810                         .halg.base = {
2811                                 .cra_name = "sha1",
2812                                 .cra_driver_name = "sha1-talitos",
2813                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2814                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2815                                              CRYPTO_ALG_ASYNC,
2816                         }
2817                 },
2818                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2819                                      DESC_HDR_SEL0_MDEUA |
2820                                      DESC_HDR_MODE0_MDEU_SHA1,
2821         },
2822         {       .type = CRYPTO_ALG_TYPE_AHASH,
2823                 .alg.hash = {
2824                         .halg.digestsize = SHA224_DIGEST_SIZE,
2825                         .halg.statesize = sizeof(struct talitos_export_state),
2826                         .halg.base = {
2827                                 .cra_name = "sha224",
2828                                 .cra_driver_name = "sha224-talitos",
2829                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2830                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2831                                              CRYPTO_ALG_ASYNC,
2832                         }
2833                 },
2834                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2835                                      DESC_HDR_SEL0_MDEUA |
2836                                      DESC_HDR_MODE0_MDEU_SHA224,
2837         },
2838         {       .type = CRYPTO_ALG_TYPE_AHASH,
2839                 .alg.hash = {
2840                         .halg.digestsize = SHA256_DIGEST_SIZE,
2841                         .halg.statesize = sizeof(struct talitos_export_state),
2842                         .halg.base = {
2843                                 .cra_name = "sha256",
2844                                 .cra_driver_name = "sha256-talitos",
2845                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2846                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2847                                              CRYPTO_ALG_ASYNC,
2848                         }
2849                 },
2850                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2851                                      DESC_HDR_SEL0_MDEUA |
2852                                      DESC_HDR_MODE0_MDEU_SHA256,
2853         },
2854         {       .type = CRYPTO_ALG_TYPE_AHASH,
2855                 .alg.hash = {
2856                         .halg.digestsize = SHA384_DIGEST_SIZE,
2857                         .halg.statesize = sizeof(struct talitos_export_state),
2858                         .halg.base = {
2859                                 .cra_name = "sha384",
2860                                 .cra_driver_name = "sha384-talitos",
2861                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2862                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2863                                              CRYPTO_ALG_ASYNC,
2864                         }
2865                 },
2866                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2867                                      DESC_HDR_SEL0_MDEUB |
2868                                      DESC_HDR_MODE0_MDEUB_SHA384,
2869         },
2870         {       .type = CRYPTO_ALG_TYPE_AHASH,
2871                 .alg.hash = {
2872                         .halg.digestsize = SHA512_DIGEST_SIZE,
2873                         .halg.statesize = sizeof(struct talitos_export_state),
2874                         .halg.base = {
2875                                 .cra_name = "sha512",
2876                                 .cra_driver_name = "sha512-talitos",
2877                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2878                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2879                                              CRYPTO_ALG_ASYNC,
2880                         }
2881                 },
2882                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2883                                      DESC_HDR_SEL0_MDEUB |
2884                                      DESC_HDR_MODE0_MDEUB_SHA512,
2885         },
2886         {       .type = CRYPTO_ALG_TYPE_AHASH,
2887                 .alg.hash = {
2888                         .halg.digestsize = MD5_DIGEST_SIZE,
2889                         .halg.statesize = sizeof(struct talitos_export_state),
2890                         .halg.base = {
2891                                 .cra_name = "hmac(md5)",
2892                                 .cra_driver_name = "hmac-md5-talitos",
2893                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2894                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2895                                              CRYPTO_ALG_ASYNC,
2896                         }
2897                 },
2898                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2899                                      DESC_HDR_SEL0_MDEUA |
2900                                      DESC_HDR_MODE0_MDEU_MD5,
2901         },
2902         {       .type = CRYPTO_ALG_TYPE_AHASH,
2903                 .alg.hash = {
2904                         .halg.digestsize = SHA1_DIGEST_SIZE,
2905                         .halg.statesize = sizeof(struct talitos_export_state),
2906                         .halg.base = {
2907                                 .cra_name = "hmac(sha1)",
2908                                 .cra_driver_name = "hmac-sha1-talitos",
2909                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2910                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2911                                              CRYPTO_ALG_ASYNC,
2912                         }
2913                 },
2914                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2915                                      DESC_HDR_SEL0_MDEUA |
2916                                      DESC_HDR_MODE0_MDEU_SHA1,
2917         },
2918         {       .type = CRYPTO_ALG_TYPE_AHASH,
2919                 .alg.hash = {
2920                         .halg.digestsize = SHA224_DIGEST_SIZE,
2921                         .halg.statesize = sizeof(struct talitos_export_state),
2922                         .halg.base = {
2923                                 .cra_name = "hmac(sha224)",
2924                                 .cra_driver_name = "hmac-sha224-talitos",
2925                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2926                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2927                                              CRYPTO_ALG_ASYNC,
2928                         }
2929                 },
2930                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2931                                      DESC_HDR_SEL0_MDEUA |
2932                                      DESC_HDR_MODE0_MDEU_SHA224,
2933         },
2934         {       .type = CRYPTO_ALG_TYPE_AHASH,
2935                 .alg.hash = {
2936                         .halg.digestsize = SHA256_DIGEST_SIZE,
2937                         .halg.statesize = sizeof(struct talitos_export_state),
2938                         .halg.base = {
2939                                 .cra_name = "hmac(sha256)",
2940                                 .cra_driver_name = "hmac-sha256-talitos",
2941                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2942                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2943                                              CRYPTO_ALG_ASYNC,
2944                         }
2945                 },
2946                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2947                                      DESC_HDR_SEL0_MDEUA |
2948                                      DESC_HDR_MODE0_MDEU_SHA256,
2949         },
2950         {       .type = CRYPTO_ALG_TYPE_AHASH,
2951                 .alg.hash = {
2952                         .halg.digestsize = SHA384_DIGEST_SIZE,
2953                         .halg.statesize = sizeof(struct talitos_export_state),
2954                         .halg.base = {
2955                                 .cra_name = "hmac(sha384)",
2956                                 .cra_driver_name = "hmac-sha384-talitos",
2957                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2958                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2959                                              CRYPTO_ALG_ASYNC,
2960                         }
2961                 },
2962                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2963                                      DESC_HDR_SEL0_MDEUB |
2964                                      DESC_HDR_MODE0_MDEUB_SHA384,
2965         },
2966         {       .type = CRYPTO_ALG_TYPE_AHASH,
2967                 .alg.hash = {
2968                         .halg.digestsize = SHA512_DIGEST_SIZE,
2969                         .halg.statesize = sizeof(struct talitos_export_state),
2970                         .halg.base = {
2971                                 .cra_name = "hmac(sha512)",
2972                                 .cra_driver_name = "hmac-sha512-talitos",
2973                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2974                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2975                                              CRYPTO_ALG_ASYNC,
2976                         }
2977                 },
2978                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2979                                      DESC_HDR_SEL0_MDEUB |
2980                                      DESC_HDR_MODE0_MDEUB_SHA512,
2981         }
2982 };
2983
2984 struct talitos_crypto_alg {
2985         struct list_head entry;
2986         struct device *dev;
2987         struct talitos_alg_template algt;
2988 };
2989
2990 static int talitos_init_common(struct talitos_ctx *ctx,
2991                                struct talitos_crypto_alg *talitos_alg)
2992 {
2993         struct talitos_private *priv;
2994
2995         /* update context with ptr to dev */
2996         ctx->dev = talitos_alg->dev;
2997
2998         /* assign SEC channel to tfm in round-robin fashion */
2999         priv = dev_get_drvdata(ctx->dev);
3000         ctx->ch = atomic_inc_return(&priv->last_chan) &
3001                   (priv->num_channels - 1);
3002
3003         /* copy descriptor header template value */
3004         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3005
3006         /* select done notification */
3007         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3008
3009         return 0;
3010 }
3011
3012 static int talitos_cra_init(struct crypto_tfm *tfm)
3013 {
3014         struct crypto_alg *alg = tfm->__crt_alg;
3015         struct talitos_crypto_alg *talitos_alg;
3016         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3017
3018         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3019                 talitos_alg = container_of(__crypto_ahash_alg(alg),
3020                                            struct talitos_crypto_alg,
3021                                            algt.alg.hash);
3022         else
3023                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3024                                            algt.alg.crypto);
3025
3026         return talitos_init_common(ctx, talitos_alg);
3027 }
3028
3029 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3030 {
3031         struct aead_alg *alg = crypto_aead_alg(tfm);
3032         struct talitos_crypto_alg *talitos_alg;
3033         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3034
3035         talitos_alg = container_of(alg, struct talitos_crypto_alg,
3036                                    algt.alg.aead);
3037
3038         return talitos_init_common(ctx, talitos_alg);
3039 }
3040
3041 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3042 {
3043         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3044
3045         talitos_cra_init(tfm);
3046
3047         ctx->keylen = 0;
3048         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3049                                  sizeof(struct talitos_ahash_req_ctx));
3050
3051         return 0;
3052 }
3053
3054 static void talitos_cra_exit(struct crypto_tfm *tfm)
3055 {
3056         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3057         struct device *dev = ctx->dev;
3058
3059         if (ctx->keylen)
3060                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3061 }
3062
3063 static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
3064 {
3065         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3066         struct device *dev = ctx->dev;
3067         unsigned int size;
3068
3069         talitos_cra_exit(tfm);
3070
3071         size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
3072                 SHA256_DIGEST_SIZE)
3073                ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
3074                : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
3075
3076         if (ctx->dma_hw_context)
3077                 dma_unmap_single(dev, ctx->dma_hw_context, size,
3078                                  DMA_BIDIRECTIONAL);
3079         if (ctx->dma_buf)
3080                 dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
3081                                  DMA_TO_DEVICE);
3082 }
3083
3084 /*
3085  * given the alg's descriptor header template, determine whether descriptor
3086  * type and primary/secondary execution units required match the hw
3087  * capabilities description provided in the device tree node.
3088  */
3089 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3090 {
3091         struct talitos_private *priv = dev_get_drvdata(dev);
3092         int ret;
3093
3094         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3095               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3096
3097         if (SECONDARY_EU(desc_hdr_template))
3098                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3099                               & priv->exec_units);
3100
3101         return ret;
3102 }
3103
3104 static int talitos_remove(struct platform_device *ofdev)
3105 {
3106         struct device *dev = &ofdev->dev;
3107         struct talitos_private *priv = dev_get_drvdata(dev);
3108         struct talitos_crypto_alg *t_alg, *n;
3109         int i;
3110
3111         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3112                 switch (t_alg->algt.type) {
3113                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3114                         break;
3115                 case CRYPTO_ALG_TYPE_AEAD:
3116                         crypto_unregister_aead(&t_alg->algt.alg.aead);
3117                 case CRYPTO_ALG_TYPE_AHASH:
3118                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
3119                         break;
3120                 }
3121                 list_del(&t_alg->entry);
3122         }
3123
3124         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3125                 talitos_unregister_rng(dev);
3126
3127         for (i = 0; i < 2; i++)
3128                 if (priv->irq[i]) {
3129                         free_irq(priv->irq[i], dev);
3130                         irq_dispose_mapping(priv->irq[i]);
3131                 }
3132
3133         tasklet_kill(&priv->done_task[0]);
3134         if (priv->irq[1])
3135                 tasklet_kill(&priv->done_task[1]);
3136
3137         return 0;
3138 }
3139
3140 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3141                                                     struct talitos_alg_template
3142                                                            *template)
3143 {
3144         struct talitos_private *priv = dev_get_drvdata(dev);
3145         struct talitos_crypto_alg *t_alg;
3146         struct crypto_alg *alg;
3147
3148         t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3149                              GFP_KERNEL);
3150         if (!t_alg)
3151                 return ERR_PTR(-ENOMEM);
3152
3153         t_alg->algt = *template;
3154
3155         switch (t_alg->algt.type) {
3156         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3157                 alg = &t_alg->algt.alg.crypto;
3158                 alg->cra_init = talitos_cra_init;
3159                 alg->cra_exit = talitos_cra_exit;
3160                 alg->cra_type = &crypto_ablkcipher_type;
3161                 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3162                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3163                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3164                 alg->cra_ablkcipher.geniv = "eseqiv";
3165                 break;
3166         case CRYPTO_ALG_TYPE_AEAD:
3167                 alg = &t_alg->algt.alg.aead.base;
3168                 alg->cra_exit = talitos_cra_exit;
3169                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3170                 t_alg->algt.alg.aead.setkey = aead_setkey;
3171                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3172                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3173                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3174                     !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3175                         devm_kfree(dev, t_alg);
3176                         return ERR_PTR(-ENOTSUPP);
3177                 }
3178                 break;
3179         case CRYPTO_ALG_TYPE_AHASH:
3180                 alg = &t_alg->algt.alg.hash.halg.base;
3181                 alg->cra_init = talitos_cra_init_ahash;
3182                 alg->cra_exit = talitos_cra_exit_ahash;
3183                 alg->cra_type = &crypto_ahash_type;
3184                 t_alg->algt.alg.hash.init = ahash_init;
3185                 t_alg->algt.alg.hash.update = ahash_update;
3186                 t_alg->algt.alg.hash.final = ahash_final;
3187                 t_alg->algt.alg.hash.finup = ahash_finup;
3188                 t_alg->algt.alg.hash.digest = ahash_digest;
3189                 if (!strncmp(alg->cra_name, "hmac", 4))
3190                         t_alg->algt.alg.hash.setkey = ahash_setkey;
3191                 t_alg->algt.alg.hash.import = ahash_import;
3192                 t_alg->algt.alg.hash.export = ahash_export;
3193
3194                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3195                     !strncmp(alg->cra_name, "hmac", 4)) {
3196                         devm_kfree(dev, t_alg);
3197                         return ERR_PTR(-ENOTSUPP);
3198                 }
3199                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3200                     (!strcmp(alg->cra_name, "sha224") ||
3201                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
3202                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3203                         t_alg->algt.desc_hdr_template =
3204                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3205                                         DESC_HDR_SEL0_MDEUA |
3206                                         DESC_HDR_MODE0_MDEU_SHA256;
3207                 }
3208                 break;
3209         default:
3210                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3211                 devm_kfree(dev, t_alg);
3212                 return ERR_PTR(-EINVAL);
3213         }
3214
3215         alg->cra_module = THIS_MODULE;
3216         if (t_alg->algt.priority)
3217                 alg->cra_priority = t_alg->algt.priority;
3218         else
3219                 alg->cra_priority = TALITOS_CRA_PRIORITY;
3220         alg->cra_alignmask = 0;
3221         alg->cra_ctxsize = sizeof(struct talitos_ctx);
3222         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3223
3224         t_alg->dev = dev;
3225
3226         return t_alg;
3227 }
3228
3229 static int talitos_probe_irq(struct platform_device *ofdev)
3230 {
3231         struct device *dev = &ofdev->dev;
3232         struct device_node *np = ofdev->dev.of_node;
3233         struct talitos_private *priv = dev_get_drvdata(dev);
3234         int err;
3235         bool is_sec1 = has_ftr_sec1(priv);
3236
3237         priv->irq[0] = irq_of_parse_and_map(np, 0);
3238         if (!priv->irq[0]) {
3239                 dev_err(dev, "failed to map irq\n");
3240                 return -EINVAL;
3241         }
3242         if (is_sec1) {
3243                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3244                                   dev_driver_string(dev), dev);
3245                 goto primary_out;
3246         }
3247
3248         priv->irq[1] = irq_of_parse_and_map(np, 1);
3249
3250         /* get the primary irq line */
3251         if (!priv->irq[1]) {
3252                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3253                                   dev_driver_string(dev), dev);
3254                 goto primary_out;
3255         }
3256
3257         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3258                           dev_driver_string(dev), dev);
3259         if (err)
3260                 goto primary_out;
3261
3262         /* get the secondary irq line */
3263         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3264                           dev_driver_string(dev), dev);
3265         if (err) {
3266                 dev_err(dev, "failed to request secondary irq\n");
3267                 irq_dispose_mapping(priv->irq[1]);
3268                 priv->irq[1] = 0;
3269         }
3270
3271         return err;
3272
3273 primary_out:
3274         if (err) {
3275                 dev_err(dev, "failed to request primary irq\n");
3276                 irq_dispose_mapping(priv->irq[0]);
3277                 priv->irq[0] = 0;
3278         }
3279
3280         return err;
3281 }
3282
3283 static int talitos_probe(struct platform_device *ofdev)
3284 {
3285         struct device *dev = &ofdev->dev;
3286         struct device_node *np = ofdev->dev.of_node;
3287         struct talitos_private *priv;
3288         int i, err;
3289         int stride;
3290         struct resource *res;
3291
3292         priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3293         if (!priv)
3294                 return -ENOMEM;
3295
3296         INIT_LIST_HEAD(&priv->alg_list);
3297
3298         dev_set_drvdata(dev, priv);
3299
3300         priv->ofdev = ofdev;
3301
3302         spin_lock_init(&priv->reg_lock);
3303
3304         res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3305         if (!res)
3306                 return -ENXIO;
3307         priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3308         if (!priv->reg) {
3309                 dev_err(dev, "failed to of_iomap\n");
3310                 err = -ENOMEM;
3311                 goto err_out;
3312         }
3313
3314         /* get SEC version capabilities from device tree */
3315         of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3316         of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3317         of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3318         of_property_read_u32(np, "fsl,descriptor-types-mask",
3319                              &priv->desc_types);
3320
3321         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3322             !priv->exec_units || !priv->desc_types) {
3323                 dev_err(dev, "invalid property data in device tree node\n");
3324                 err = -EINVAL;
3325                 goto err_out;
3326         }
3327
3328         if (of_device_is_compatible(np, "fsl,sec3.0"))
3329                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3330
3331         if (of_device_is_compatible(np, "fsl,sec2.1"))
3332                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3333                                   TALITOS_FTR_SHA224_HWINIT |
3334                                   TALITOS_FTR_HMAC_OK;
3335
3336         if (of_device_is_compatible(np, "fsl,sec1.0"))
3337                 priv->features |= TALITOS_FTR_SEC1;
3338
3339         if (of_device_is_compatible(np, "fsl,sec1.2")) {
3340                 priv->reg_deu = priv->reg + TALITOS12_DEU;
3341                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3342                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3343                 stride = TALITOS1_CH_STRIDE;
3344         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3345                 priv->reg_deu = priv->reg + TALITOS10_DEU;
3346                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3347                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3348                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3349                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3350                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3351                 stride = TALITOS1_CH_STRIDE;
3352         } else {
3353                 priv->reg_deu = priv->reg + TALITOS2_DEU;
3354                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3355                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3356                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3357                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3358                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3359                 priv->reg_keu = priv->reg + TALITOS2_KEU;
3360                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3361                 stride = TALITOS2_CH_STRIDE;
3362         }
3363
3364         err = talitos_probe_irq(ofdev);
3365         if (err)
3366                 goto err_out;
3367
3368         if (of_device_is_compatible(np, "fsl,sec1.0")) {
3369                 if (priv->num_channels == 1)
3370                         tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3371                                      (unsigned long)dev);
3372                 else
3373                         tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3374                                      (unsigned long)dev);
3375         } else {
3376                 if (priv->irq[1]) {
3377                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3378                                      (unsigned long)dev);
3379                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3380                                      (unsigned long)dev);
3381                 } else if (priv->num_channels == 1) {
3382                         tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3383                                      (unsigned long)dev);
3384                 } else {
3385                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3386                                      (unsigned long)dev);
3387                 }
3388         }
3389
3390         priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
3391                                        priv->num_channels, GFP_KERNEL);
3392         if (!priv->chan) {
3393                 dev_err(dev, "failed to allocate channel management space\n");
3394                 err = -ENOMEM;
3395                 goto err_out;
3396         }
3397
3398         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3399
3400         for (i = 0; i < priv->num_channels; i++) {
3401                 priv->chan[i].reg = priv->reg + stride * (i + 1);
3402                 if (!priv->irq[1] || !(i & 1))
3403                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3404
3405                 spin_lock_init(&priv->chan[i].head_lock);
3406                 spin_lock_init(&priv->chan[i].tail_lock);
3407
3408                 priv->chan[i].fifo = devm_kzalloc(dev,
3409                                                 sizeof(struct talitos_request) *
3410                                                 priv->fifo_len, GFP_KERNEL);
3411                 if (!priv->chan[i].fifo) {
3412                         dev_err(dev, "failed to allocate request fifo %d\n", i);
3413                         err = -ENOMEM;
3414                         goto err_out;
3415                 }
3416
3417                 atomic_set(&priv->chan[i].submit_count,
3418                            -(priv->chfifo_len - 1));
3419         }
3420
3421         dma_set_mask(dev, DMA_BIT_MASK(36));
3422
3423         /* reset and initialize the h/w */
3424         err = init_device(dev);
3425         if (err) {
3426                 dev_err(dev, "failed to initialize device\n");
3427                 goto err_out;
3428         }
3429
3430         /* register the RNG, if available */
3431         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3432                 err = talitos_register_rng(dev);
3433                 if (err) {
3434                         dev_err(dev, "failed to register hwrng: %d\n", err);
3435                         goto err_out;
3436                 } else
3437                         dev_info(dev, "hwrng\n");
3438         }
3439
3440         /* register crypto algorithms the device supports */
3441         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3442                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3443                         struct talitos_crypto_alg *t_alg;
3444                         struct crypto_alg *alg = NULL;
3445
3446                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3447                         if (IS_ERR(t_alg)) {
3448                                 err = PTR_ERR(t_alg);
3449                                 if (err == -ENOTSUPP)
3450                                         continue;
3451                                 goto err_out;
3452                         }
3453
3454                         switch (t_alg->algt.type) {
3455                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3456                                 err = crypto_register_alg(
3457                                                 &t_alg->algt.alg.crypto);
3458                                 alg = &t_alg->algt.alg.crypto;
3459                                 break;
3460
3461                         case CRYPTO_ALG_TYPE_AEAD:
3462                                 err = crypto_register_aead(
3463                                         &t_alg->algt.alg.aead);
3464                                 alg = &t_alg->algt.alg.aead.base;
3465                                 break;
3466
3467                         case CRYPTO_ALG_TYPE_AHASH:
3468                                 err = crypto_register_ahash(
3469                                                 &t_alg->algt.alg.hash);
3470                                 alg = &t_alg->algt.alg.hash.halg.base;
3471                                 break;
3472                         }
3473                         if (err) {
3474                                 dev_err(dev, "%s alg registration failed\n",
3475                                         alg->cra_driver_name);
3476                                 devm_kfree(dev, t_alg);
3477                         } else
3478                                 list_add_tail(&t_alg->entry, &priv->alg_list);
3479                 }
3480         }
3481         if (!list_empty(&priv->alg_list))
3482                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3483                          (char *)of_get_property(np, "compatible", NULL));
3484
3485         return 0;
3486
3487 err_out:
3488         talitos_remove(ofdev);
3489
3490         return err;
3491 }
3492
3493 static const struct of_device_id talitos_match[] = {
3494 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3495         {
3496                 .compatible = "fsl,sec1.0",
3497         },
3498 #endif
3499 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3500         {
3501                 .compatible = "fsl,sec2.0",
3502         },
3503 #endif
3504         {},
3505 };
3506 MODULE_DEVICE_TABLE(of, talitos_match);
3507
3508 static struct platform_driver talitos_driver = {
3509         .driver = {
3510                 .name = "talitos",
3511                 .of_match_table = talitos_match,
3512         },
3513         .probe = talitos_probe,
3514         .remove = talitos_remove,
3515 };
3516
3517 module_platform_driver(talitos_driver);
3518
3519 MODULE_LICENSE("GPL");
3520 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3521 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");