Merge tag 'sound-5.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[sfrench/cifs-2.6.git] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59                            unsigned int len, bool is_sec1)
60 {
61         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62         if (is_sec1) {
63                 ptr->len1 = cpu_to_be16(len);
64         } else {
65                 ptr->len = cpu_to_be16(len);
66                 ptr->eptr = upper_32_bits(dma_addr);
67         }
68 }
69
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71                              struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73         dst_ptr->ptr = src_ptr->ptr;
74         if (is_sec1) {
75                 dst_ptr->len1 = src_ptr->len1;
76         } else {
77                 dst_ptr->len = src_ptr->len;
78                 dst_ptr->eptr = src_ptr->eptr;
79         }
80 }
81
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83                                            bool is_sec1)
84 {
85         if (is_sec1)
86                 return be16_to_cpu(ptr->len1);
87         else
88                 return be16_to_cpu(ptr->len);
89 }
90
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92                                    bool is_sec1)
93 {
94         if (!is_sec1)
95                 ptr->j_extent = val;
96 }
97
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100         if (!is_sec1)
101                 ptr->j_extent |= val;
102 }
103
104 /*
105  * map virtual single (contiguous) pointer to h/w descriptor pointer
106  */
107 static void __map_single_talitos_ptr(struct device *dev,
108                                      struct talitos_ptr *ptr,
109                                      unsigned int len, void *data,
110                                      enum dma_data_direction dir,
111                                      unsigned long attrs)
112 {
113         dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114         struct talitos_private *priv = dev_get_drvdata(dev);
115         bool is_sec1 = has_ftr_sec1(priv);
116
117         to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118 }
119
120 static void map_single_talitos_ptr(struct device *dev,
121                                    struct talitos_ptr *ptr,
122                                    unsigned int len, void *data,
123                                    enum dma_data_direction dir)
124 {
125         __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
126 }
127
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129                                           struct talitos_ptr *ptr,
130                                           unsigned int len, void *data,
131                                           enum dma_data_direction dir)
132 {
133         __map_single_talitos_ptr(dev, ptr, len, data, dir,
134                                  DMA_ATTR_SKIP_CPU_SYNC);
135 }
136
137 /*
138  * unmap bus single (contiguous) h/w descriptor pointer
139  */
140 static void unmap_single_talitos_ptr(struct device *dev,
141                                      struct talitos_ptr *ptr,
142                                      enum dma_data_direction dir)
143 {
144         struct talitos_private *priv = dev_get_drvdata(dev);
145         bool is_sec1 = has_ftr_sec1(priv);
146
147         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148                          from_talitos_ptr_len(ptr, is_sec1), dir);
149 }
150
151 static int reset_channel(struct device *dev, int ch)
152 {
153         struct talitos_private *priv = dev_get_drvdata(dev);
154         unsigned int timeout = TALITOS_TIMEOUT;
155         bool is_sec1 = has_ftr_sec1(priv);
156
157         if (is_sec1) {
158                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159                           TALITOS1_CCCR_LO_RESET);
160
161                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162                         TALITOS1_CCCR_LO_RESET) && --timeout)
163                         cpu_relax();
164         } else {
165                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166                           TALITOS2_CCCR_RESET);
167
168                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169                         TALITOS2_CCCR_RESET) && --timeout)
170                         cpu_relax();
171         }
172
173         if (timeout == 0) {
174                 dev_err(dev, "failed to reset channel %d\n", ch);
175                 return -EIO;
176         }
177
178         /* set 36-bit addressing, done writeback enable and done IRQ enable */
179         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181         /* enable chaining descriptors */
182         if (is_sec1)
183                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184                           TALITOS_CCCR_LO_NE);
185
186         /* and ICCR writeback, if available */
187         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189                           TALITOS_CCCR_LO_IWSE);
190
191         return 0;
192 }
193
194 static int reset_device(struct device *dev)
195 {
196         struct talitos_private *priv = dev_get_drvdata(dev);
197         unsigned int timeout = TALITOS_TIMEOUT;
198         bool is_sec1 = has_ftr_sec1(priv);
199         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
200
201         setbits32(priv->reg + TALITOS_MCR, mcr);
202
203         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204                && --timeout)
205                 cpu_relax();
206
207         if (priv->irq[1]) {
208                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209                 setbits32(priv->reg + TALITOS_MCR, mcr);
210         }
211
212         if (timeout == 0) {
213                 dev_err(dev, "failed to reset device\n");
214                 return -EIO;
215         }
216
217         return 0;
218 }
219
220 /*
221  * Reset and initialize the device
222  */
223 static int init_device(struct device *dev)
224 {
225         struct talitos_private *priv = dev_get_drvdata(dev);
226         int ch, err;
227         bool is_sec1 = has_ftr_sec1(priv);
228
229         /*
230          * Master reset
231          * errata documentation: warning: certain SEC interrupts
232          * are not fully cleared by writing the MCR:SWR bit,
233          * set bit twice to completely reset
234          */
235         err = reset_device(dev);
236         if (err)
237                 return err;
238
239         err = reset_device(dev);
240         if (err)
241                 return err;
242
243         /* reset channels */
244         for (ch = 0; ch < priv->num_channels; ch++) {
245                 err = reset_channel(dev, ch);
246                 if (err)
247                         return err;
248         }
249
250         /* enable channel done and error interrupts */
251         if (is_sec1) {
252                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254                 /* disable parity error check in DEU (erroneous? test vect.) */
255                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256         } else {
257                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
259         }
260
261         /* disable integrity check error interrupts (use writeback instead) */
262         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264                           TALITOS_MDEUICR_LO_ICE);
265
266         return 0;
267 }
268
269 /**
270  * talitos_submit - submits a descriptor to the device for processing
271  * @dev:        the SEC device to be used
272  * @ch:         the SEC device channel to be used
273  * @desc:       the descriptor to be processed by the device
274  * @callback:   whom to call when processing is complete
275  * @context:    a handle for use by caller (optional)
276  *
277  * desc must contain valid dma-mapped (bus physical) address pointers.
278  * callback must check err and feedback in descriptor header
279  * for device processing status.
280  */
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282                    void (*callback)(struct device *dev,
283                                     struct talitos_desc *desc,
284                                     void *context, int error),
285                    void *context)
286 {
287         struct talitos_private *priv = dev_get_drvdata(dev);
288         struct talitos_request *request;
289         unsigned long flags;
290         int head;
291         bool is_sec1 = has_ftr_sec1(priv);
292
293         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
294
295         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296                 /* h/w fifo is full */
297                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298                 return -EAGAIN;
299         }
300
301         head = priv->chan[ch].head;
302         request = &priv->chan[ch].fifo[head];
303
304         /* map descriptor and save caller data */
305         if (is_sec1) {
306                 desc->hdr1 = desc->hdr;
307                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
308                                                    TALITOS_DESC_SIZE,
309                                                    DMA_BIDIRECTIONAL);
310         } else {
311                 request->dma_desc = dma_map_single(dev, desc,
312                                                    TALITOS_DESC_SIZE,
313                                                    DMA_BIDIRECTIONAL);
314         }
315         request->callback = callback;
316         request->context = context;
317
318         /* increment fifo head */
319         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
320
321         smp_wmb();
322         request->desc = desc;
323
324         /* GO! */
325         wmb();
326         out_be32(priv->chan[ch].reg + TALITOS_FF,
327                  upper_32_bits(request->dma_desc));
328         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329                  lower_32_bits(request->dma_desc));
330
331         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
332
333         return -EINPROGRESS;
334 }
335 EXPORT_SYMBOL(talitos_submit);
336
337 /*
338  * process what was done, notify callback of error if not
339  */
340 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
341 {
342         struct talitos_private *priv = dev_get_drvdata(dev);
343         struct talitos_request *request, saved_req;
344         unsigned long flags;
345         int tail, status;
346         bool is_sec1 = has_ftr_sec1(priv);
347
348         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
349
350         tail = priv->chan[ch].tail;
351         while (priv->chan[ch].fifo[tail].desc) {
352                 __be32 hdr;
353
354                 request = &priv->chan[ch].fifo[tail];
355
356                 /* descriptors with their done bits set don't get the error */
357                 rmb();
358                 if (!is_sec1)
359                         hdr = request->desc->hdr;
360                 else if (request->desc->next_desc)
361                         hdr = (request->desc + 1)->hdr1;
362                 else
363                         hdr = request->desc->hdr1;
364
365                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
366                         status = 0;
367                 else
368                         if (!error)
369                                 break;
370                         else
371                                 status = error;
372
373                 dma_unmap_single(dev, request->dma_desc,
374                                  TALITOS_DESC_SIZE,
375                                  DMA_BIDIRECTIONAL);
376
377                 /* copy entries so we can call callback outside lock */
378                 saved_req.desc = request->desc;
379                 saved_req.callback = request->callback;
380                 saved_req.context = request->context;
381
382                 /* release request entry in fifo */
383                 smp_wmb();
384                 request->desc = NULL;
385
386                 /* increment fifo tail */
387                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
388
389                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
390
391                 atomic_dec(&priv->chan[ch].submit_count);
392
393                 saved_req.callback(dev, saved_req.desc, saved_req.context,
394                                    status);
395                 /* channel may resume processing in single desc error case */
396                 if (error && !reset_ch && status == error)
397                         return;
398                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
399                 tail = priv->chan[ch].tail;
400         }
401
402         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
403 }
404
405 /*
406  * process completed requests for channels that have done status
407  */
408 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
409 static void talitos1_done_##name(unsigned long data)                    \
410 {                                                                       \
411         struct device *dev = (struct device *)data;                     \
412         struct talitos_private *priv = dev_get_drvdata(dev);            \
413         unsigned long flags;                                            \
414                                                                         \
415         if (ch_done_mask & 0x10000000)                                  \
416                 flush_channel(dev, 0, 0, 0);                    \
417         if (ch_done_mask & 0x40000000)                                  \
418                 flush_channel(dev, 1, 0, 0);                    \
419         if (ch_done_mask & 0x00010000)                                  \
420                 flush_channel(dev, 2, 0, 0);                    \
421         if (ch_done_mask & 0x00040000)                                  \
422                 flush_channel(dev, 3, 0, 0);                    \
423                                                                         \
424         /* At this point, all completed channels have been processed */ \
425         /* Unmask done interrupts for channels completed later on. */   \
426         spin_lock_irqsave(&priv->reg_lock, flags);                      \
427         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
428         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
429         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
430 }
431
432 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
433 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
434
435 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
436 static void talitos2_done_##name(unsigned long data)                    \
437 {                                                                       \
438         struct device *dev = (struct device *)data;                     \
439         struct talitos_private *priv = dev_get_drvdata(dev);            \
440         unsigned long flags;                                            \
441                                                                         \
442         if (ch_done_mask & 1)                                           \
443                 flush_channel(dev, 0, 0, 0);                            \
444         if (ch_done_mask & (1 << 2))                                    \
445                 flush_channel(dev, 1, 0, 0);                            \
446         if (ch_done_mask & (1 << 4))                                    \
447                 flush_channel(dev, 2, 0, 0);                            \
448         if (ch_done_mask & (1 << 6))                                    \
449                 flush_channel(dev, 3, 0, 0);                            \
450                                                                         \
451         /* At this point, all completed channels have been processed */ \
452         /* Unmask done interrupts for channels completed later on. */   \
453         spin_lock_irqsave(&priv->reg_lock, flags);                      \
454         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
455         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
456         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
457 }
458
459 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
460 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
461 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
462 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
463
464 /*
465  * locate current (offending) descriptor
466  */
467 static u32 current_desc_hdr(struct device *dev, int ch)
468 {
469         struct talitos_private *priv = dev_get_drvdata(dev);
470         int tail, iter;
471         dma_addr_t cur_desc;
472
473         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
474         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
475
476         if (!cur_desc) {
477                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
478                 return 0;
479         }
480
481         tail = priv->chan[ch].tail;
482
483         iter = tail;
484         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
485                priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
486                 iter = (iter + 1) & (priv->fifo_len - 1);
487                 if (iter == tail) {
488                         dev_err(dev, "couldn't locate current descriptor\n");
489                         return 0;
490                 }
491         }
492
493         if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
494                 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
495
496         return priv->chan[ch].fifo[iter].desc->hdr;
497 }
498
499 /*
500  * user diagnostics; report root cause of error based on execution unit status
501  */
502 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
503 {
504         struct talitos_private *priv = dev_get_drvdata(dev);
505         int i;
506
507         if (!desc_hdr)
508                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
509
510         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
511         case DESC_HDR_SEL0_AFEU:
512                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
513                         in_be32(priv->reg_afeu + TALITOS_EUISR),
514                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
515                 break;
516         case DESC_HDR_SEL0_DEU:
517                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
518                         in_be32(priv->reg_deu + TALITOS_EUISR),
519                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
520                 break;
521         case DESC_HDR_SEL0_MDEUA:
522         case DESC_HDR_SEL0_MDEUB:
523                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
524                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
525                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
526                 break;
527         case DESC_HDR_SEL0_RNG:
528                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
529                         in_be32(priv->reg_rngu + TALITOS_ISR),
530                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
531                 break;
532         case DESC_HDR_SEL0_PKEU:
533                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
534                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
535                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
536                 break;
537         case DESC_HDR_SEL0_AESU:
538                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
539                         in_be32(priv->reg_aesu + TALITOS_EUISR),
540                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
541                 break;
542         case DESC_HDR_SEL0_CRCU:
543                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
544                         in_be32(priv->reg_crcu + TALITOS_EUISR),
545                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
546                 break;
547         case DESC_HDR_SEL0_KEU:
548                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
549                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
550                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
551                 break;
552         }
553
554         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
555         case DESC_HDR_SEL1_MDEUA:
556         case DESC_HDR_SEL1_MDEUB:
557                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
558                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
559                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
560                 break;
561         case DESC_HDR_SEL1_CRCU:
562                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
563                         in_be32(priv->reg_crcu + TALITOS_EUISR),
564                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
565                 break;
566         }
567
568         for (i = 0; i < 8; i++)
569                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
570                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
571                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
572 }
573
574 /*
575  * recover from error interrupts
576  */
577 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
578 {
579         struct talitos_private *priv = dev_get_drvdata(dev);
580         unsigned int timeout = TALITOS_TIMEOUT;
581         int ch, error, reset_dev = 0;
582         u32 v_lo;
583         bool is_sec1 = has_ftr_sec1(priv);
584         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
585
586         for (ch = 0; ch < priv->num_channels; ch++) {
587                 /* skip channels without errors */
588                 if (is_sec1) {
589                         /* bits 29, 31, 17, 19 */
590                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
591                                 continue;
592                 } else {
593                         if (!(isr & (1 << (ch * 2 + 1))))
594                                 continue;
595                 }
596
597                 error = -EINVAL;
598
599                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
600
601                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
602                         dev_err(dev, "double fetch fifo overflow error\n");
603                         error = -EAGAIN;
604                         reset_ch = 1;
605                 }
606                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
607                         /* h/w dropped descriptor */
608                         dev_err(dev, "single fetch fifo overflow error\n");
609                         error = -EAGAIN;
610                 }
611                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
612                         dev_err(dev, "master data transfer error\n");
613                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
614                         dev_err(dev, is_sec1 ? "pointer not complete error\n"
615                                              : "s/g data length zero error\n");
616                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
617                         dev_err(dev, is_sec1 ? "parity error\n"
618                                              : "fetch pointer zero error\n");
619                 if (v_lo & TALITOS_CCPSR_LO_IDH)
620                         dev_err(dev, "illegal descriptor header error\n");
621                 if (v_lo & TALITOS_CCPSR_LO_IEU)
622                         dev_err(dev, is_sec1 ? "static assignment error\n"
623                                              : "invalid exec unit error\n");
624                 if (v_lo & TALITOS_CCPSR_LO_EU)
625                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
626                 if (!is_sec1) {
627                         if (v_lo & TALITOS_CCPSR_LO_GB)
628                                 dev_err(dev, "gather boundary error\n");
629                         if (v_lo & TALITOS_CCPSR_LO_GRL)
630                                 dev_err(dev, "gather return/length error\n");
631                         if (v_lo & TALITOS_CCPSR_LO_SB)
632                                 dev_err(dev, "scatter boundary error\n");
633                         if (v_lo & TALITOS_CCPSR_LO_SRL)
634                                 dev_err(dev, "scatter return/length error\n");
635                 }
636
637                 flush_channel(dev, ch, error, reset_ch);
638
639                 if (reset_ch) {
640                         reset_channel(dev, ch);
641                 } else {
642                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
643                                   TALITOS2_CCCR_CONT);
644                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
645                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
646                                TALITOS2_CCCR_CONT) && --timeout)
647                                 cpu_relax();
648                         if (timeout == 0) {
649                                 dev_err(dev, "failed to restart channel %d\n",
650                                         ch);
651                                 reset_dev = 1;
652                         }
653                 }
654         }
655         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
656             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
657                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
658                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
659                                 isr, isr_lo);
660                 else
661                         dev_err(dev, "done overflow, internal time out, or "
662                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
663
664                 /* purge request queues */
665                 for (ch = 0; ch < priv->num_channels; ch++)
666                         flush_channel(dev, ch, -EIO, 1);
667
668                 /* reset and reinitialize the device */
669                 init_device(dev);
670         }
671 }
672
673 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
674 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
675 {                                                                              \
676         struct device *dev = data;                                             \
677         struct talitos_private *priv = dev_get_drvdata(dev);                   \
678         u32 isr, isr_lo;                                                       \
679         unsigned long flags;                                                   \
680                                                                                \
681         spin_lock_irqsave(&priv->reg_lock, flags);                             \
682         isr = in_be32(priv->reg + TALITOS_ISR);                                \
683         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
684         /* Acknowledge interrupt */                                            \
685         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
687                                                                                \
688         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
689                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
690                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
691         }                                                                      \
692         else {                                                                 \
693                 if (likely(isr & ch_done_mask)) {                              \
694                         /* mask further done interrupts. */                    \
695                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
696                         /* done_task will unmask done interrupts at exit */    \
697                         tasklet_schedule(&priv->done_task[tlet]);              \
698                 }                                                              \
699                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
700         }                                                                      \
701                                                                                \
702         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
703                                                                 IRQ_NONE;      \
704 }
705
706 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
707
708 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
709 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
710 {                                                                              \
711         struct device *dev = data;                                             \
712         struct talitos_private *priv = dev_get_drvdata(dev);                   \
713         u32 isr, isr_lo;                                                       \
714         unsigned long flags;                                                   \
715                                                                                \
716         spin_lock_irqsave(&priv->reg_lock, flags);                             \
717         isr = in_be32(priv->reg + TALITOS_ISR);                                \
718         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
719         /* Acknowledge interrupt */                                            \
720         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
721         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
722                                                                                \
723         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
724                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
725                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
726         }                                                                      \
727         else {                                                                 \
728                 if (likely(isr & ch_done_mask)) {                              \
729                         /* mask further done interrupts. */                    \
730                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
731                         /* done_task will unmask done interrupts at exit */    \
732                         tasklet_schedule(&priv->done_task[tlet]);              \
733                 }                                                              \
734                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
735         }                                                                      \
736                                                                                \
737         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
738                                                                 IRQ_NONE;      \
739 }
740
741 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
742 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
743                        0)
744 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
745                        1)
746
747 /*
748  * hwrng
749  */
750 static int talitos_rng_data_present(struct hwrng *rng, int wait)
751 {
752         struct device *dev = (struct device *)rng->priv;
753         struct talitos_private *priv = dev_get_drvdata(dev);
754         u32 ofl;
755         int i;
756
757         for (i = 0; i < 20; i++) {
758                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
759                       TALITOS_RNGUSR_LO_OFL;
760                 if (ofl || !wait)
761                         break;
762                 udelay(10);
763         }
764
765         return !!ofl;
766 }
767
768 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
769 {
770         struct device *dev = (struct device *)rng->priv;
771         struct talitos_private *priv = dev_get_drvdata(dev);
772
773         /* rng fifo requires 64-bit accesses */
774         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
775         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
776
777         return sizeof(u32);
778 }
779
780 static int talitos_rng_init(struct hwrng *rng)
781 {
782         struct device *dev = (struct device *)rng->priv;
783         struct talitos_private *priv = dev_get_drvdata(dev);
784         unsigned int timeout = TALITOS_TIMEOUT;
785
786         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
787         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
788                  & TALITOS_RNGUSR_LO_RD)
789                && --timeout)
790                 cpu_relax();
791         if (timeout == 0) {
792                 dev_err(dev, "failed to reset rng hw\n");
793                 return -ENODEV;
794         }
795
796         /* start generating */
797         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
798
799         return 0;
800 }
801
802 static int talitos_register_rng(struct device *dev)
803 {
804         struct talitos_private *priv = dev_get_drvdata(dev);
805         int err;
806
807         priv->rng.name          = dev_driver_string(dev),
808         priv->rng.init          = talitos_rng_init,
809         priv->rng.data_present  = talitos_rng_data_present,
810         priv->rng.data_read     = talitos_rng_data_read,
811         priv->rng.priv          = (unsigned long)dev;
812
813         err = hwrng_register(&priv->rng);
814         if (!err)
815                 priv->rng_registered = true;
816
817         return err;
818 }
819
820 static void talitos_unregister_rng(struct device *dev)
821 {
822         struct talitos_private *priv = dev_get_drvdata(dev);
823
824         if (!priv->rng_registered)
825                 return;
826
827         hwrng_unregister(&priv->rng);
828         priv->rng_registered = false;
829 }
830
831 /*
832  * crypto alg
833  */
834 #define TALITOS_CRA_PRIORITY            3000
835 /*
836  * Defines a priority for doing AEAD with descriptors type
837  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
838  */
839 #define TALITOS_CRA_PRIORITY_AEAD_HSNA  (TALITOS_CRA_PRIORITY - 1)
840 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
841 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
842
843 struct talitos_ctx {
844         struct device *dev;
845         int ch;
846         __be32 desc_hdr_template;
847         u8 key[TALITOS_MAX_KEY_SIZE];
848         u8 iv[TALITOS_MAX_IV_LENGTH];
849         dma_addr_t dma_key;
850         unsigned int keylen;
851         unsigned int enckeylen;
852         unsigned int authkeylen;
853 };
854
855 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
856 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
857
858 struct talitos_ahash_req_ctx {
859         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
860         unsigned int hw_context_size;
861         u8 buf[2][HASH_MAX_BLOCK_SIZE];
862         int buf_idx;
863         unsigned int swinit;
864         unsigned int first;
865         unsigned int last;
866         unsigned int to_hash_later;
867         unsigned int nbuf;
868         struct scatterlist bufsl[2];
869         struct scatterlist *psrc;
870 };
871
872 struct talitos_export_state {
873         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
874         u8 buf[HASH_MAX_BLOCK_SIZE];
875         unsigned int swinit;
876         unsigned int first;
877         unsigned int last;
878         unsigned int to_hash_later;
879         unsigned int nbuf;
880 };
881
882 static int aead_setkey(struct crypto_aead *authenc,
883                        const u8 *key, unsigned int keylen)
884 {
885         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
886         struct device *dev = ctx->dev;
887         struct crypto_authenc_keys keys;
888
889         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
890                 goto badkey;
891
892         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
893                 goto badkey;
894
895         if (ctx->keylen)
896                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
897
898         memcpy(ctx->key, keys.authkey, keys.authkeylen);
899         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
900
901         ctx->keylen = keys.authkeylen + keys.enckeylen;
902         ctx->enckeylen = keys.enckeylen;
903         ctx->authkeylen = keys.authkeylen;
904         ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
905                                       DMA_TO_DEVICE);
906
907         memzero_explicit(&keys, sizeof(keys));
908         return 0;
909
910 badkey:
911         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
912         memzero_explicit(&keys, sizeof(keys));
913         return -EINVAL;
914 }
915
916 /*
917  * talitos_edesc - s/w-extended descriptor
918  * @src_nents: number of segments in input scatterlist
919  * @dst_nents: number of segments in output scatterlist
920  * @icv_ool: whether ICV is out-of-line
921  * @iv_dma: dma address of iv for checking continuity and link table
922  * @dma_len: length of dma mapped link_tbl space
923  * @dma_link_tbl: bus physical address of link_tbl/buf
924  * @desc: h/w descriptor
925  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
926  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
927  *
928  * if decrypting (with authcheck), or either one of src_nents or dst_nents
929  * is greater than 1, an integrity check value is concatenated to the end
930  * of link_tbl data
931  */
932 struct talitos_edesc {
933         int src_nents;
934         int dst_nents;
935         bool icv_ool;
936         dma_addr_t iv_dma;
937         int dma_len;
938         dma_addr_t dma_link_tbl;
939         struct talitos_desc desc;
940         union {
941                 struct talitos_ptr link_tbl[0];
942                 u8 buf[0];
943         };
944 };
945
946 static void talitos_sg_unmap(struct device *dev,
947                              struct talitos_edesc *edesc,
948                              struct scatterlist *src,
949                              struct scatterlist *dst,
950                              unsigned int len, unsigned int offset)
951 {
952         struct talitos_private *priv = dev_get_drvdata(dev);
953         bool is_sec1 = has_ftr_sec1(priv);
954         unsigned int src_nents = edesc->src_nents ? : 1;
955         unsigned int dst_nents = edesc->dst_nents ? : 1;
956
957         if (is_sec1 && dst && dst_nents > 1) {
958                 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
959                                            len, DMA_FROM_DEVICE);
960                 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
961                                      offset);
962         }
963         if (src != dst) {
964                 if (src_nents == 1 || !is_sec1)
965                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
966
967                 if (dst && (dst_nents == 1 || !is_sec1))
968                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
969         } else if (src_nents == 1 || !is_sec1) {
970                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
971         }
972 }
973
974 static void ipsec_esp_unmap(struct device *dev,
975                             struct talitos_edesc *edesc,
976                             struct aead_request *areq)
977 {
978         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
979         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
980         unsigned int ivsize = crypto_aead_ivsize(aead);
981         bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
982         struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
983
984         if (is_ipsec_esp)
985                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
986                                          DMA_FROM_DEVICE);
987         unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
988
989         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
990                          areq->assoclen);
991
992         if (edesc->dma_len)
993                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
994                                  DMA_BIDIRECTIONAL);
995
996         if (!is_ipsec_esp) {
997                 unsigned int dst_nents = edesc->dst_nents ? : 1;
998
999                 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1000                                    areq->assoclen + areq->cryptlen - ivsize);
1001         }
1002 }
1003
1004 /*
1005  * ipsec_esp descriptor callbacks
1006  */
1007 static void ipsec_esp_encrypt_done(struct device *dev,
1008                                    struct talitos_desc *desc, void *context,
1009                                    int err)
1010 {
1011         struct talitos_private *priv = dev_get_drvdata(dev);
1012         bool is_sec1 = has_ftr_sec1(priv);
1013         struct aead_request *areq = context;
1014         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1015         unsigned int authsize = crypto_aead_authsize(authenc);
1016         unsigned int ivsize = crypto_aead_ivsize(authenc);
1017         struct talitos_edesc *edesc;
1018         struct scatterlist *sg;
1019         void *icvdata;
1020
1021         edesc = container_of(desc, struct talitos_edesc, desc);
1022
1023         ipsec_esp_unmap(dev, edesc, areq);
1024
1025         /* copy the generated ICV to dst */
1026         if (edesc->icv_ool) {
1027                 if (is_sec1)
1028                         icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1029                 else
1030                         icvdata = &edesc->link_tbl[edesc->src_nents +
1031                                                    edesc->dst_nents + 2];
1032                 sg = sg_last(areq->dst, edesc->dst_nents);
1033                 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1034                        icvdata, authsize);
1035         }
1036
1037         dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1038
1039         kfree(edesc);
1040
1041         aead_request_complete(areq, err);
1042 }
1043
1044 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1045                                           struct talitos_desc *desc,
1046                                           void *context, int err)
1047 {
1048         struct aead_request *req = context;
1049         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1050         unsigned int authsize = crypto_aead_authsize(authenc);
1051         struct talitos_edesc *edesc;
1052         struct scatterlist *sg;
1053         char *oicv, *icv;
1054         struct talitos_private *priv = dev_get_drvdata(dev);
1055         bool is_sec1 = has_ftr_sec1(priv);
1056
1057         edesc = container_of(desc, struct talitos_edesc, desc);
1058
1059         ipsec_esp_unmap(dev, edesc, req);
1060
1061         if (!err) {
1062                 /* auth check */
1063                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1064                 icv = (char *)sg_virt(sg) + sg->length - authsize;
1065
1066                 if (edesc->dma_len) {
1067                         if (is_sec1)
1068                                 oicv = (char *)&edesc->dma_link_tbl +
1069                                                req->assoclen + req->cryptlen;
1070                         else
1071                                 oicv = (char *)
1072                                        &edesc->link_tbl[edesc->src_nents +
1073                                                         edesc->dst_nents + 2];
1074                         if (edesc->icv_ool)
1075                                 icv = oicv + authsize;
1076                 } else
1077                         oicv = (char *)&edesc->link_tbl[0];
1078
1079                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1080         }
1081
1082         kfree(edesc);
1083
1084         aead_request_complete(req, err);
1085 }
1086
1087 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1088                                           struct talitos_desc *desc,
1089                                           void *context, int err)
1090 {
1091         struct aead_request *req = context;
1092         struct talitos_edesc *edesc;
1093
1094         edesc = container_of(desc, struct talitos_edesc, desc);
1095
1096         ipsec_esp_unmap(dev, edesc, req);
1097
1098         /* check ICV auth status */
1099         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1100                      DESC_HDR_LO_ICCR1_PASS))
1101                 err = -EBADMSG;
1102
1103         kfree(edesc);
1104
1105         aead_request_complete(req, err);
1106 }
1107
1108 /*
1109  * convert scatterlist to SEC h/w link table format
1110  * stop at cryptlen bytes
1111  */
1112 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1113                                  unsigned int offset, int cryptlen,
1114                                  struct talitos_ptr *link_tbl_ptr)
1115 {
1116         int n_sg = sg_count;
1117         int count = 0;
1118
1119         while (cryptlen && sg && n_sg--) {
1120                 unsigned int len = sg_dma_len(sg);
1121
1122                 if (offset >= len) {
1123                         offset -= len;
1124                         goto next;
1125                 }
1126
1127                 len -= offset;
1128
1129                 if (len > cryptlen)
1130                         len = cryptlen;
1131
1132                 to_talitos_ptr(link_tbl_ptr + count,
1133                                sg_dma_address(sg) + offset, len, 0);
1134                 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1135                 count++;
1136                 cryptlen -= len;
1137                 offset = 0;
1138
1139 next:
1140                 sg = sg_next(sg);
1141         }
1142
1143         /* tag end of link table */
1144         if (count > 0)
1145                 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1146                                        DESC_PTR_LNKTBL_RETURN, 0);
1147
1148         return count;
1149 }
1150
1151 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1152                               unsigned int len, struct talitos_edesc *edesc,
1153                               struct talitos_ptr *ptr, int sg_count,
1154                               unsigned int offset, int tbl_off, int elen)
1155 {
1156         struct talitos_private *priv = dev_get_drvdata(dev);
1157         bool is_sec1 = has_ftr_sec1(priv);
1158
1159         if (!src) {
1160                 to_talitos_ptr(ptr, 0, 0, is_sec1);
1161                 return 1;
1162         }
1163         to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1164         if (sg_count == 1) {
1165                 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1166                 return sg_count;
1167         }
1168         if (is_sec1) {
1169                 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1170                 return sg_count;
1171         }
1172         sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1173                                          &edesc->link_tbl[tbl_off]);
1174         if (sg_count == 1) {
1175                 /* Only one segment now, so no link tbl needed*/
1176                 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1177                 return sg_count;
1178         }
1179         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1180                             tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1181         to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1182
1183         return sg_count;
1184 }
1185
1186 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1187                           unsigned int len, struct talitos_edesc *edesc,
1188                           struct talitos_ptr *ptr, int sg_count,
1189                           unsigned int offset, int tbl_off)
1190 {
1191         return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1192                                   tbl_off, 0);
1193 }
1194
1195 /*
1196  * fill in and submit ipsec_esp descriptor
1197  */
1198 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1199                      void (*callback)(struct device *dev,
1200                                       struct talitos_desc *desc,
1201                                       void *context, int error))
1202 {
1203         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1204         unsigned int authsize = crypto_aead_authsize(aead);
1205         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1206         struct device *dev = ctx->dev;
1207         struct talitos_desc *desc = &edesc->desc;
1208         unsigned int cryptlen = areq->cryptlen;
1209         unsigned int ivsize = crypto_aead_ivsize(aead);
1210         int tbl_off = 0;
1211         int sg_count, ret;
1212         int elen = 0;
1213         bool sync_needed = false;
1214         struct talitos_private *priv = dev_get_drvdata(dev);
1215         bool is_sec1 = has_ftr_sec1(priv);
1216         bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1217         struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1218         struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1219
1220         /* hmac key */
1221         to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1222
1223         sg_count = edesc->src_nents ?: 1;
1224         if (is_sec1 && sg_count > 1)
1225                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1226                                   areq->assoclen + cryptlen);
1227         else
1228                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1229                                       (areq->src == areq->dst) ?
1230                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1231
1232         /* hmac data */
1233         ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1234                              &desc->ptr[1], sg_count, 0, tbl_off);
1235
1236         if (ret > 1) {
1237                 tbl_off += ret;
1238                 sync_needed = true;
1239         }
1240
1241         /* cipher iv */
1242         to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1243
1244         /* cipher key */
1245         to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1246                        ctx->enckeylen, is_sec1);
1247
1248         /*
1249          * cipher in
1250          * map and adjust cipher len to aead request cryptlen.
1251          * extent is bytes of HMAC postpended to ciphertext,
1252          * typically 12 for ipsec
1253          */
1254         if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1255                 elen = authsize;
1256
1257         ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1258                                  sg_count, areq->assoclen, tbl_off, elen);
1259
1260         if (ret > 1) {
1261                 tbl_off += ret;
1262                 sync_needed = true;
1263         }
1264
1265         /* cipher out */
1266         if (areq->src != areq->dst) {
1267                 sg_count = edesc->dst_nents ? : 1;
1268                 if (!is_sec1 || sg_count == 1)
1269                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1270         }
1271
1272         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1273                              sg_count, areq->assoclen, tbl_off);
1274
1275         if (is_ipsec_esp)
1276                 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1277
1278         /* ICV data */
1279         if (ret > 1) {
1280                 tbl_off += ret;
1281                 edesc->icv_ool = true;
1282                 sync_needed = true;
1283
1284                 if (is_ipsec_esp) {
1285                         struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1286                         int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1287                                      sizeof(struct talitos_ptr) + authsize;
1288
1289                         /* Add an entry to the link table for ICV data */
1290                         to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1291                         to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1292                                                is_sec1);
1293
1294                         /* icv data follows link tables */
1295                         to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1296                                        authsize, is_sec1);
1297                 } else {
1298                         dma_addr_t addr = edesc->dma_link_tbl;
1299
1300                         if (is_sec1)
1301                                 addr += areq->assoclen + cryptlen;
1302                         else
1303                                 addr += sizeof(struct talitos_ptr) * tbl_off;
1304
1305                         to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1306                 }
1307         } else if (!is_ipsec_esp) {
1308                 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1309                                      &desc->ptr[6], sg_count, areq->assoclen +
1310                                                               cryptlen,
1311                                      tbl_off);
1312                 if (ret > 1) {
1313                         tbl_off += ret;
1314                         edesc->icv_ool = true;
1315                         sync_needed = true;
1316                 } else {
1317                         edesc->icv_ool = false;
1318                 }
1319         } else {
1320                 edesc->icv_ool = false;
1321         }
1322
1323         /* iv out */
1324         if (is_ipsec_esp)
1325                 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1326                                        DMA_FROM_DEVICE);
1327
1328         if (sync_needed)
1329                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1330                                            edesc->dma_len,
1331                                            DMA_BIDIRECTIONAL);
1332
1333         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1334         if (ret != -EINPROGRESS) {
1335                 ipsec_esp_unmap(dev, edesc, areq);
1336                 kfree(edesc);
1337         }
1338         return ret;
1339 }
1340
1341 /*
1342  * allocate and map the extended descriptor
1343  */
1344 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1345                                                  struct scatterlist *src,
1346                                                  struct scatterlist *dst,
1347                                                  u8 *iv,
1348                                                  unsigned int assoclen,
1349                                                  unsigned int cryptlen,
1350                                                  unsigned int authsize,
1351                                                  unsigned int ivsize,
1352                                                  int icv_stashing,
1353                                                  u32 cryptoflags,
1354                                                  bool encrypt)
1355 {
1356         struct talitos_edesc *edesc;
1357         int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1358         dma_addr_t iv_dma = 0;
1359         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1360                       GFP_ATOMIC;
1361         struct talitos_private *priv = dev_get_drvdata(dev);
1362         bool is_sec1 = has_ftr_sec1(priv);
1363         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1364
1365         if (cryptlen + authsize > max_len) {
1366                 dev_err(dev, "length exceeds h/w max limit\n");
1367                 return ERR_PTR(-EINVAL);
1368         }
1369
1370         if (!dst || dst == src) {
1371                 src_len = assoclen + cryptlen + authsize;
1372                 src_nents = sg_nents_for_len(src, src_len);
1373                 if (src_nents < 0) {
1374                         dev_err(dev, "Invalid number of src SG.\n");
1375                         return ERR_PTR(-EINVAL);
1376                 }
1377                 src_nents = (src_nents == 1) ? 0 : src_nents;
1378                 dst_nents = dst ? src_nents : 0;
1379                 dst_len = 0;
1380         } else { /* dst && dst != src*/
1381                 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1382                 src_nents = sg_nents_for_len(src, src_len);
1383                 if (src_nents < 0) {
1384                         dev_err(dev, "Invalid number of src SG.\n");
1385                         return ERR_PTR(-EINVAL);
1386                 }
1387                 src_nents = (src_nents == 1) ? 0 : src_nents;
1388                 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1389                 dst_nents = sg_nents_for_len(dst, dst_len);
1390                 if (dst_nents < 0) {
1391                         dev_err(dev, "Invalid number of dst SG.\n");
1392                         return ERR_PTR(-EINVAL);
1393                 }
1394                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1395         }
1396
1397         /*
1398          * allocate space for base edesc plus the link tables,
1399          * allowing for two separate entries for AD and generated ICV (+ 2),
1400          * and space for two sets of ICVs (stashed and generated)
1401          */
1402         alloc_len = sizeof(struct talitos_edesc);
1403         if (src_nents || dst_nents) {
1404                 if (is_sec1)
1405                         dma_len = (src_nents ? src_len : 0) +
1406                                   (dst_nents ? dst_len : 0);
1407                 else
1408                         dma_len = (src_nents + dst_nents + 2) *
1409                                   sizeof(struct talitos_ptr) + authsize * 2;
1410                 alloc_len += dma_len;
1411         } else {
1412                 dma_len = 0;
1413                 alloc_len += icv_stashing ? authsize : 0;
1414         }
1415
1416         /* if its a ahash, add space for a second desc next to the first one */
1417         if (is_sec1 && !dst)
1418                 alloc_len += sizeof(struct talitos_desc);
1419         alloc_len += ivsize;
1420
1421         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1422         if (!edesc)
1423                 return ERR_PTR(-ENOMEM);
1424         if (ivsize) {
1425                 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1426                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1427         }
1428         memset(&edesc->desc, 0, sizeof(edesc->desc));
1429
1430         edesc->src_nents = src_nents;
1431         edesc->dst_nents = dst_nents;
1432         edesc->iv_dma = iv_dma;
1433         edesc->dma_len = dma_len;
1434         if (dma_len) {
1435                 void *addr = &edesc->link_tbl[0];
1436
1437                 if (is_sec1 && !dst)
1438                         addr += sizeof(struct talitos_desc);
1439                 edesc->dma_link_tbl = dma_map_single(dev, addr,
1440                                                      edesc->dma_len,
1441                                                      DMA_BIDIRECTIONAL);
1442         }
1443         return edesc;
1444 }
1445
1446 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1447                                               int icv_stashing, bool encrypt)
1448 {
1449         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1450         unsigned int authsize = crypto_aead_authsize(authenc);
1451         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1452         unsigned int ivsize = crypto_aead_ivsize(authenc);
1453
1454         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1455                                    iv, areq->assoclen, areq->cryptlen,
1456                                    authsize, ivsize, icv_stashing,
1457                                    areq->base.flags, encrypt);
1458 }
1459
1460 static int aead_encrypt(struct aead_request *req)
1461 {
1462         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1463         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1464         struct talitos_edesc *edesc;
1465
1466         /* allocate extended descriptor */
1467         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1468         if (IS_ERR(edesc))
1469                 return PTR_ERR(edesc);
1470
1471         /* set encrypt */
1472         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1473
1474         return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1475 }
1476
1477 static int aead_decrypt(struct aead_request *req)
1478 {
1479         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1480         unsigned int authsize = crypto_aead_authsize(authenc);
1481         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1482         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1483         struct talitos_edesc *edesc;
1484         struct scatterlist *sg;
1485         void *icvdata;
1486
1487         req->cryptlen -= authsize;
1488
1489         /* allocate extended descriptor */
1490         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1491         if (IS_ERR(edesc))
1492                 return PTR_ERR(edesc);
1493
1494         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1495             ((!edesc->src_nents && !edesc->dst_nents) ||
1496              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1497
1498                 /* decrypt and check the ICV */
1499                 edesc->desc.hdr = ctx->desc_hdr_template |
1500                                   DESC_HDR_DIR_INBOUND |
1501                                   DESC_HDR_MODE1_MDEU_CICV;
1502
1503                 /* reset integrity check result bits */
1504
1505                 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1506         }
1507
1508         /* Have to check the ICV with software */
1509         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1510
1511         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1512         if (edesc->dma_len)
1513                 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1514                                                    edesc->dst_nents + 2];
1515         else
1516                 icvdata = &edesc->link_tbl[0];
1517
1518         sg = sg_last(req->src, edesc->src_nents ? : 1);
1519
1520         memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1521
1522         return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1523 }
1524
1525 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1526                              const u8 *key, unsigned int keylen)
1527 {
1528         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1529         struct device *dev = ctx->dev;
1530         u32 tmp[DES_EXPKEY_WORDS];
1531
1532         if (keylen > TALITOS_MAX_KEY_SIZE) {
1533                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1534                 return -EINVAL;
1535         }
1536
1537         if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1538                      CRYPTO_TFM_REQ_WEAK_KEY) &&
1539             !des_ekey(tmp, key)) {
1540                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1541                 return -EINVAL;
1542         }
1543
1544         if (ctx->keylen)
1545                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1546
1547         memcpy(&ctx->key, key, keylen);
1548         ctx->keylen = keylen;
1549
1550         ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1551
1552         return 0;
1553 }
1554
1555 static void common_nonsnoop_unmap(struct device *dev,
1556                                   struct talitos_edesc *edesc,
1557                                   struct ablkcipher_request *areq)
1558 {
1559         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1560
1561         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1562         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1563
1564         if (edesc->dma_len)
1565                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1566                                  DMA_BIDIRECTIONAL);
1567 }
1568
1569 static void ablkcipher_done(struct device *dev,
1570                             struct talitos_desc *desc, void *context,
1571                             int err)
1572 {
1573         struct ablkcipher_request *areq = context;
1574         struct talitos_edesc *edesc;
1575
1576         edesc = container_of(desc, struct talitos_edesc, desc);
1577
1578         common_nonsnoop_unmap(dev, edesc, areq);
1579
1580         kfree(edesc);
1581
1582         areq->base.complete(&areq->base, err);
1583 }
1584
1585 static int common_nonsnoop(struct talitos_edesc *edesc,
1586                            struct ablkcipher_request *areq,
1587                            void (*callback) (struct device *dev,
1588                                              struct talitos_desc *desc,
1589                                              void *context, int error))
1590 {
1591         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1592         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1593         struct device *dev = ctx->dev;
1594         struct talitos_desc *desc = &edesc->desc;
1595         unsigned int cryptlen = areq->nbytes;
1596         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1597         int sg_count, ret;
1598         bool sync_needed = false;
1599         struct talitos_private *priv = dev_get_drvdata(dev);
1600         bool is_sec1 = has_ftr_sec1(priv);
1601
1602         /* first DWORD empty */
1603
1604         /* cipher iv */
1605         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1606
1607         /* cipher key */
1608         to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1609
1610         sg_count = edesc->src_nents ?: 1;
1611         if (is_sec1 && sg_count > 1)
1612                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1613                                   cryptlen);
1614         else
1615                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1616                                       (areq->src == areq->dst) ?
1617                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1618         /*
1619          * cipher in
1620          */
1621         sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1622                                   &desc->ptr[3], sg_count, 0, 0);
1623         if (sg_count > 1)
1624                 sync_needed = true;
1625
1626         /* cipher out */
1627         if (areq->src != areq->dst) {
1628                 sg_count = edesc->dst_nents ? : 1;
1629                 if (!is_sec1 || sg_count == 1)
1630                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1631         }
1632
1633         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1634                              sg_count, 0, (edesc->src_nents + 1));
1635         if (ret > 1)
1636                 sync_needed = true;
1637
1638         /* iv out */
1639         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1640                                DMA_FROM_DEVICE);
1641
1642         /* last DWORD empty */
1643
1644         if (sync_needed)
1645                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1646                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1647
1648         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1649         if (ret != -EINPROGRESS) {
1650                 common_nonsnoop_unmap(dev, edesc, areq);
1651                 kfree(edesc);
1652         }
1653         return ret;
1654 }
1655
1656 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1657                                                     areq, bool encrypt)
1658 {
1659         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1660         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1661         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1662
1663         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1664                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1665                                    areq->base.flags, encrypt);
1666 }
1667
1668 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1669 {
1670         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1671         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1672         struct talitos_edesc *edesc;
1673
1674         /* allocate extended descriptor */
1675         edesc = ablkcipher_edesc_alloc(areq, true);
1676         if (IS_ERR(edesc))
1677                 return PTR_ERR(edesc);
1678
1679         /* set encrypt */
1680         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1681
1682         return common_nonsnoop(edesc, areq, ablkcipher_done);
1683 }
1684
1685 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1686 {
1687         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1688         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1689         struct talitos_edesc *edesc;
1690
1691         /* allocate extended descriptor */
1692         edesc = ablkcipher_edesc_alloc(areq, false);
1693         if (IS_ERR(edesc))
1694                 return PTR_ERR(edesc);
1695
1696         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1697
1698         return common_nonsnoop(edesc, areq, ablkcipher_done);
1699 }
1700
1701 static void common_nonsnoop_hash_unmap(struct device *dev,
1702                                        struct talitos_edesc *edesc,
1703                                        struct ahash_request *areq)
1704 {
1705         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1706         struct talitos_private *priv = dev_get_drvdata(dev);
1707         bool is_sec1 = has_ftr_sec1(priv);
1708         struct talitos_desc *desc = &edesc->desc;
1709         struct talitos_desc *desc2 = desc + 1;
1710
1711         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1712         if (desc->next_desc &&
1713             desc->ptr[5].ptr != desc2->ptr[5].ptr)
1714                 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1715
1716         talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1717
1718         /* When using hashctx-in, must unmap it. */
1719         if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1720                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1721                                          DMA_TO_DEVICE);
1722         else if (desc->next_desc)
1723                 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1724                                          DMA_TO_DEVICE);
1725
1726         if (is_sec1 && req_ctx->nbuf)
1727                 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1728                                          DMA_TO_DEVICE);
1729
1730         if (edesc->dma_len)
1731                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1732                                  DMA_BIDIRECTIONAL);
1733
1734         if (edesc->desc.next_desc)
1735                 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1736                                  TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1737 }
1738
1739 static void ahash_done(struct device *dev,
1740                        struct talitos_desc *desc, void *context,
1741                        int err)
1742 {
1743         struct ahash_request *areq = context;
1744         struct talitos_edesc *edesc =
1745                  container_of(desc, struct talitos_edesc, desc);
1746         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1747
1748         if (!req_ctx->last && req_ctx->to_hash_later) {
1749                 /* Position any partial block for next update/final/finup */
1750                 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1751                 req_ctx->nbuf = req_ctx->to_hash_later;
1752         }
1753         common_nonsnoop_hash_unmap(dev, edesc, areq);
1754
1755         kfree(edesc);
1756
1757         areq->base.complete(&areq->base, err);
1758 }
1759
1760 /*
1761  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1762  * ourself and submit a padded block
1763  */
1764 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1765                                struct talitos_edesc *edesc,
1766                                struct talitos_ptr *ptr)
1767 {
1768         static u8 padded_hash[64] = {
1769                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1770                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1771                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1772                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1773         };
1774
1775         pr_err_once("Bug in SEC1, padding ourself\n");
1776         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1777         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1778                                (char *)padded_hash, DMA_TO_DEVICE);
1779 }
1780
1781 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1782                                 struct ahash_request *areq, unsigned int length,
1783                                 unsigned int offset,
1784                                 void (*callback) (struct device *dev,
1785                                                   struct talitos_desc *desc,
1786                                                   void *context, int error))
1787 {
1788         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1789         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1790         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1791         struct device *dev = ctx->dev;
1792         struct talitos_desc *desc = &edesc->desc;
1793         int ret;
1794         bool sync_needed = false;
1795         struct talitos_private *priv = dev_get_drvdata(dev);
1796         bool is_sec1 = has_ftr_sec1(priv);
1797         int sg_count;
1798
1799         /* first DWORD empty */
1800
1801         /* hash context in */
1802         if (!req_ctx->first || req_ctx->swinit) {
1803                 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1804                                               req_ctx->hw_context_size,
1805                                               req_ctx->hw_context,
1806                                               DMA_TO_DEVICE);
1807                 req_ctx->swinit = 0;
1808         }
1809         /* Indicate next op is not the first. */
1810         req_ctx->first = 0;
1811
1812         /* HMAC key */
1813         if (ctx->keylen)
1814                 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1815                                is_sec1);
1816
1817         if (is_sec1 && req_ctx->nbuf)
1818                 length -= req_ctx->nbuf;
1819
1820         sg_count = edesc->src_nents ?: 1;
1821         if (is_sec1 && sg_count > 1)
1822                 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1823                                    edesc->buf + sizeof(struct talitos_desc),
1824                                    length, req_ctx->nbuf);
1825         else if (length)
1826                 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1827                                       DMA_TO_DEVICE);
1828         /*
1829          * data in
1830          */
1831         if (is_sec1 && req_ctx->nbuf) {
1832                 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1833                                        req_ctx->buf[req_ctx->buf_idx],
1834                                        DMA_TO_DEVICE);
1835         } else {
1836                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1837                                           &desc->ptr[3], sg_count, offset, 0);
1838                 if (sg_count > 1)
1839                         sync_needed = true;
1840         }
1841
1842         /* fifth DWORD empty */
1843
1844         /* hash/HMAC out -or- hash context out */
1845         if (req_ctx->last)
1846                 map_single_talitos_ptr(dev, &desc->ptr[5],
1847                                        crypto_ahash_digestsize(tfm),
1848                                        areq->result, DMA_FROM_DEVICE);
1849         else
1850                 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1851                                               req_ctx->hw_context_size,
1852                                               req_ctx->hw_context,
1853                                               DMA_FROM_DEVICE);
1854
1855         /* last DWORD empty */
1856
1857         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1858                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1859
1860         if (is_sec1 && req_ctx->nbuf && length) {
1861                 struct talitos_desc *desc2 = desc + 1;
1862                 dma_addr_t next_desc;
1863
1864                 memset(desc2, 0, sizeof(*desc2));
1865                 desc2->hdr = desc->hdr;
1866                 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1867                 desc2->hdr1 = desc2->hdr;
1868                 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1869                 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1870                 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1871
1872                 if (desc->ptr[1].ptr)
1873                         copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1874                                          is_sec1);
1875                 else
1876                         map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1877                                                       req_ctx->hw_context_size,
1878                                                       req_ctx->hw_context,
1879                                                       DMA_TO_DEVICE);
1880                 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1881                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1882                                           &desc2->ptr[3], sg_count, offset, 0);
1883                 if (sg_count > 1)
1884                         sync_needed = true;
1885                 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1886                 if (req_ctx->last)
1887                         map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1888                                                       req_ctx->hw_context_size,
1889                                                       req_ctx->hw_context,
1890                                                       DMA_FROM_DEVICE);
1891
1892                 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1893                                            DMA_BIDIRECTIONAL);
1894                 desc->next_desc = cpu_to_be32(next_desc);
1895         }
1896
1897         if (sync_needed)
1898                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1899                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1900
1901         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1902         if (ret != -EINPROGRESS) {
1903                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1904                 kfree(edesc);
1905         }
1906         return ret;
1907 }
1908
1909 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1910                                                unsigned int nbytes)
1911 {
1912         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1913         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1914         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1915         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1916         bool is_sec1 = has_ftr_sec1(priv);
1917
1918         if (is_sec1)
1919                 nbytes -= req_ctx->nbuf;
1920
1921         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1922                                    nbytes, 0, 0, 0, areq->base.flags, false);
1923 }
1924
1925 static int ahash_init(struct ahash_request *areq)
1926 {
1927         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1928         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1929         struct device *dev = ctx->dev;
1930         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1931         unsigned int size;
1932         dma_addr_t dma;
1933
1934         /* Initialize the context */
1935         req_ctx->buf_idx = 0;
1936         req_ctx->nbuf = 0;
1937         req_ctx->first = 1; /* first indicates h/w must init its context */
1938         req_ctx->swinit = 0; /* assume h/w init of context */
1939         size =  (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1940                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1941                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1942         req_ctx->hw_context_size = size;
1943
1944         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1945                              DMA_TO_DEVICE);
1946         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1947
1948         return 0;
1949 }
1950
1951 /*
1952  * on h/w without explicit sha224 support, we initialize h/w context
1953  * manually with sha224 constants, and tell it to run sha256.
1954  */
1955 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1956 {
1957         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1958
1959         req_ctx->hw_context[0] = SHA224_H0;
1960         req_ctx->hw_context[1] = SHA224_H1;
1961         req_ctx->hw_context[2] = SHA224_H2;
1962         req_ctx->hw_context[3] = SHA224_H3;
1963         req_ctx->hw_context[4] = SHA224_H4;
1964         req_ctx->hw_context[5] = SHA224_H5;
1965         req_ctx->hw_context[6] = SHA224_H6;
1966         req_ctx->hw_context[7] = SHA224_H7;
1967
1968         /* init 64-bit count */
1969         req_ctx->hw_context[8] = 0;
1970         req_ctx->hw_context[9] = 0;
1971
1972         ahash_init(areq);
1973         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1974
1975         return 0;
1976 }
1977
1978 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1979 {
1980         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1981         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1982         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1983         struct talitos_edesc *edesc;
1984         unsigned int blocksize =
1985                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1986         unsigned int nbytes_to_hash;
1987         unsigned int to_hash_later;
1988         unsigned int nsg;
1989         int nents;
1990         struct device *dev = ctx->dev;
1991         struct talitos_private *priv = dev_get_drvdata(dev);
1992         bool is_sec1 = has_ftr_sec1(priv);
1993         int offset = 0;
1994         u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1995
1996         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1997                 /* Buffer up to one whole block */
1998                 nents = sg_nents_for_len(areq->src, nbytes);
1999                 if (nents < 0) {
2000                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2001                         return nents;
2002                 }
2003                 sg_copy_to_buffer(areq->src, nents,
2004                                   ctx_buf + req_ctx->nbuf, nbytes);
2005                 req_ctx->nbuf += nbytes;
2006                 return 0;
2007         }
2008
2009         /* At least (blocksize + 1) bytes are available to hash */
2010         nbytes_to_hash = nbytes + req_ctx->nbuf;
2011         to_hash_later = nbytes_to_hash & (blocksize - 1);
2012
2013         if (req_ctx->last)
2014                 to_hash_later = 0;
2015         else if (to_hash_later)
2016                 /* There is a partial block. Hash the full block(s) now */
2017                 nbytes_to_hash -= to_hash_later;
2018         else {
2019                 /* Keep one block buffered */
2020                 nbytes_to_hash -= blocksize;
2021                 to_hash_later = blocksize;
2022         }
2023
2024         /* Chain in any previously buffered data */
2025         if (!is_sec1 && req_ctx->nbuf) {
2026                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2027                 sg_init_table(req_ctx->bufsl, nsg);
2028                 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2029                 if (nsg > 1)
2030                         sg_chain(req_ctx->bufsl, 2, areq->src);
2031                 req_ctx->psrc = req_ctx->bufsl;
2032         } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2033                 if (nbytes_to_hash > blocksize)
2034                         offset = blocksize - req_ctx->nbuf;
2035                 else
2036                         offset = nbytes_to_hash - req_ctx->nbuf;
2037                 nents = sg_nents_for_len(areq->src, offset);
2038                 if (nents < 0) {
2039                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2040                         return nents;
2041                 }
2042                 sg_copy_to_buffer(areq->src, nents,
2043                                   ctx_buf + req_ctx->nbuf, offset);
2044                 req_ctx->nbuf += offset;
2045                 req_ctx->psrc = areq->src;
2046         } else
2047                 req_ctx->psrc = areq->src;
2048
2049         if (to_hash_later) {
2050                 nents = sg_nents_for_len(areq->src, nbytes);
2051                 if (nents < 0) {
2052                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2053                         return nents;
2054                 }
2055                 sg_pcopy_to_buffer(areq->src, nents,
2056                                    req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2057                                       to_hash_later,
2058                                       nbytes - to_hash_later);
2059         }
2060         req_ctx->to_hash_later = to_hash_later;
2061
2062         /* Allocate extended descriptor */
2063         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2064         if (IS_ERR(edesc))
2065                 return PTR_ERR(edesc);
2066
2067         edesc->desc.hdr = ctx->desc_hdr_template;
2068
2069         /* On last one, request SEC to pad; otherwise continue */
2070         if (req_ctx->last)
2071                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2072         else
2073                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2074
2075         /* request SEC to INIT hash. */
2076         if (req_ctx->first && !req_ctx->swinit)
2077                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2078
2079         /* When the tfm context has a keylen, it's an HMAC.
2080          * A first or last (ie. not middle) descriptor must request HMAC.
2081          */
2082         if (ctx->keylen && (req_ctx->first || req_ctx->last))
2083                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2084
2085         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2086                                     ahash_done);
2087 }
2088
2089 static int ahash_update(struct ahash_request *areq)
2090 {
2091         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2092
2093         req_ctx->last = 0;
2094
2095         return ahash_process_req(areq, areq->nbytes);
2096 }
2097
2098 static int ahash_final(struct ahash_request *areq)
2099 {
2100         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2101
2102         req_ctx->last = 1;
2103
2104         return ahash_process_req(areq, 0);
2105 }
2106
2107 static int ahash_finup(struct ahash_request *areq)
2108 {
2109         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2110
2111         req_ctx->last = 1;
2112
2113         return ahash_process_req(areq, areq->nbytes);
2114 }
2115
2116 static int ahash_digest(struct ahash_request *areq)
2117 {
2118         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2119         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2120
2121         ahash->init(areq);
2122         req_ctx->last = 1;
2123
2124         return ahash_process_req(areq, areq->nbytes);
2125 }
2126
2127 static int ahash_export(struct ahash_request *areq, void *out)
2128 {
2129         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2130         struct talitos_export_state *export = out;
2131         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2132         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2133         struct device *dev = ctx->dev;
2134         dma_addr_t dma;
2135
2136         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2137                              DMA_FROM_DEVICE);
2138         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2139
2140         memcpy(export->hw_context, req_ctx->hw_context,
2141                req_ctx->hw_context_size);
2142         memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2143         export->swinit = req_ctx->swinit;
2144         export->first = req_ctx->first;
2145         export->last = req_ctx->last;
2146         export->to_hash_later = req_ctx->to_hash_later;
2147         export->nbuf = req_ctx->nbuf;
2148
2149         return 0;
2150 }
2151
2152 static int ahash_import(struct ahash_request *areq, const void *in)
2153 {
2154         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2155         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2156         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2157         struct device *dev = ctx->dev;
2158         const struct talitos_export_state *export = in;
2159         unsigned int size;
2160         dma_addr_t dma;
2161
2162         memset(req_ctx, 0, sizeof(*req_ctx));
2163         size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2164                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2165                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2166         req_ctx->hw_context_size = size;
2167         memcpy(req_ctx->hw_context, export->hw_context, size);
2168         memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2169         req_ctx->swinit = export->swinit;
2170         req_ctx->first = export->first;
2171         req_ctx->last = export->last;
2172         req_ctx->to_hash_later = export->to_hash_later;
2173         req_ctx->nbuf = export->nbuf;
2174
2175         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2176                              DMA_TO_DEVICE);
2177         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2178
2179         return 0;
2180 }
2181
2182 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2183                    u8 *hash)
2184 {
2185         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2186
2187         struct scatterlist sg[1];
2188         struct ahash_request *req;
2189         struct crypto_wait wait;
2190         int ret;
2191
2192         crypto_init_wait(&wait);
2193
2194         req = ahash_request_alloc(tfm, GFP_KERNEL);
2195         if (!req)
2196                 return -ENOMEM;
2197
2198         /* Keep tfm keylen == 0 during hash of the long key */
2199         ctx->keylen = 0;
2200         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2201                                    crypto_req_done, &wait);
2202
2203         sg_init_one(&sg[0], key, keylen);
2204
2205         ahash_request_set_crypt(req, sg, hash, keylen);
2206         ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2207
2208         ahash_request_free(req);
2209
2210         return ret;
2211 }
2212
2213 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2214                         unsigned int keylen)
2215 {
2216         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2217         struct device *dev = ctx->dev;
2218         unsigned int blocksize =
2219                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2220         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2221         unsigned int keysize = keylen;
2222         u8 hash[SHA512_DIGEST_SIZE];
2223         int ret;
2224
2225         if (keylen <= blocksize)
2226                 memcpy(ctx->key, key, keysize);
2227         else {
2228                 /* Must get the hash of the long key */
2229                 ret = keyhash(tfm, key, keylen, hash);
2230
2231                 if (ret) {
2232                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2233                         return -EINVAL;
2234                 }
2235
2236                 keysize = digestsize;
2237                 memcpy(ctx->key, hash, digestsize);
2238         }
2239
2240         if (ctx->keylen)
2241                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2242
2243         ctx->keylen = keysize;
2244         ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2245
2246         return 0;
2247 }
2248
2249
2250 struct talitos_alg_template {
2251         u32 type;
2252         u32 priority;
2253         union {
2254                 struct crypto_alg crypto;
2255                 struct ahash_alg hash;
2256                 struct aead_alg aead;
2257         } alg;
2258         __be32 desc_hdr_template;
2259 };
2260
2261 static struct talitos_alg_template driver_algs[] = {
2262         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2263         {       .type = CRYPTO_ALG_TYPE_AEAD,
2264                 .alg.aead = {
2265                         .base = {
2266                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2267                                 .cra_driver_name = "authenc-hmac-sha1-"
2268                                                    "cbc-aes-talitos",
2269                                 .cra_blocksize = AES_BLOCK_SIZE,
2270                                 .cra_flags = CRYPTO_ALG_ASYNC,
2271                         },
2272                         .ivsize = AES_BLOCK_SIZE,
2273                         .maxauthsize = SHA1_DIGEST_SIZE,
2274                 },
2275                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2276                                      DESC_HDR_SEL0_AESU |
2277                                      DESC_HDR_MODE0_AESU_CBC |
2278                                      DESC_HDR_SEL1_MDEUA |
2279                                      DESC_HDR_MODE1_MDEU_INIT |
2280                                      DESC_HDR_MODE1_MDEU_PAD |
2281                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2282         },
2283         {       .type = CRYPTO_ALG_TYPE_AEAD,
2284                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2285                 .alg.aead = {
2286                         .base = {
2287                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2288                                 .cra_driver_name = "authenc-hmac-sha1-"
2289                                                    "cbc-aes-talitos",
2290                                 .cra_blocksize = AES_BLOCK_SIZE,
2291                                 .cra_flags = CRYPTO_ALG_ASYNC,
2292                         },
2293                         .ivsize = AES_BLOCK_SIZE,
2294                         .maxauthsize = SHA1_DIGEST_SIZE,
2295                 },
2296                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2297                                      DESC_HDR_SEL0_AESU |
2298                                      DESC_HDR_MODE0_AESU_CBC |
2299                                      DESC_HDR_SEL1_MDEUA |
2300                                      DESC_HDR_MODE1_MDEU_INIT |
2301                                      DESC_HDR_MODE1_MDEU_PAD |
2302                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2303         },
2304         {       .type = CRYPTO_ALG_TYPE_AEAD,
2305                 .alg.aead = {
2306                         .base = {
2307                                 .cra_name = "authenc(hmac(sha1),"
2308                                             "cbc(des3_ede))",
2309                                 .cra_driver_name = "authenc-hmac-sha1-"
2310                                                    "cbc-3des-talitos",
2311                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2312                                 .cra_flags = CRYPTO_ALG_ASYNC,
2313                         },
2314                         .ivsize = DES3_EDE_BLOCK_SIZE,
2315                         .maxauthsize = SHA1_DIGEST_SIZE,
2316                 },
2317                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2318                                      DESC_HDR_SEL0_DEU |
2319                                      DESC_HDR_MODE0_DEU_CBC |
2320                                      DESC_HDR_MODE0_DEU_3DES |
2321                                      DESC_HDR_SEL1_MDEUA |
2322                                      DESC_HDR_MODE1_MDEU_INIT |
2323                                      DESC_HDR_MODE1_MDEU_PAD |
2324                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2325         },
2326         {       .type = CRYPTO_ALG_TYPE_AEAD,
2327                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2328                 .alg.aead = {
2329                         .base = {
2330                                 .cra_name = "authenc(hmac(sha1),"
2331                                             "cbc(des3_ede))",
2332                                 .cra_driver_name = "authenc-hmac-sha1-"
2333                                                    "cbc-3des-talitos",
2334                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2335                                 .cra_flags = CRYPTO_ALG_ASYNC,
2336                         },
2337                         .ivsize = DES3_EDE_BLOCK_SIZE,
2338                         .maxauthsize = SHA1_DIGEST_SIZE,
2339                 },
2340                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2341                                      DESC_HDR_SEL0_DEU |
2342                                      DESC_HDR_MODE0_DEU_CBC |
2343                                      DESC_HDR_MODE0_DEU_3DES |
2344                                      DESC_HDR_SEL1_MDEUA |
2345                                      DESC_HDR_MODE1_MDEU_INIT |
2346                                      DESC_HDR_MODE1_MDEU_PAD |
2347                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2348         },
2349         {       .type = CRYPTO_ALG_TYPE_AEAD,
2350                 .alg.aead = {
2351                         .base = {
2352                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2353                                 .cra_driver_name = "authenc-hmac-sha224-"
2354                                                    "cbc-aes-talitos",
2355                                 .cra_blocksize = AES_BLOCK_SIZE,
2356                                 .cra_flags = CRYPTO_ALG_ASYNC,
2357                         },
2358                         .ivsize = AES_BLOCK_SIZE,
2359                         .maxauthsize = SHA224_DIGEST_SIZE,
2360                 },
2361                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2362                                      DESC_HDR_SEL0_AESU |
2363                                      DESC_HDR_MODE0_AESU_CBC |
2364                                      DESC_HDR_SEL1_MDEUA |
2365                                      DESC_HDR_MODE1_MDEU_INIT |
2366                                      DESC_HDR_MODE1_MDEU_PAD |
2367                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2368         },
2369         {       .type = CRYPTO_ALG_TYPE_AEAD,
2370                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2371                 .alg.aead = {
2372                         .base = {
2373                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2374                                 .cra_driver_name = "authenc-hmac-sha224-"
2375                                                    "cbc-aes-talitos",
2376                                 .cra_blocksize = AES_BLOCK_SIZE,
2377                                 .cra_flags = CRYPTO_ALG_ASYNC,
2378                         },
2379                         .ivsize = AES_BLOCK_SIZE,
2380                         .maxauthsize = SHA224_DIGEST_SIZE,
2381                 },
2382                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2383                                      DESC_HDR_SEL0_AESU |
2384                                      DESC_HDR_MODE0_AESU_CBC |
2385                                      DESC_HDR_SEL1_MDEUA |
2386                                      DESC_HDR_MODE1_MDEU_INIT |
2387                                      DESC_HDR_MODE1_MDEU_PAD |
2388                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2389         },
2390         {       .type = CRYPTO_ALG_TYPE_AEAD,
2391                 .alg.aead = {
2392                         .base = {
2393                                 .cra_name = "authenc(hmac(sha224),"
2394                                             "cbc(des3_ede))",
2395                                 .cra_driver_name = "authenc-hmac-sha224-"
2396                                                    "cbc-3des-talitos",
2397                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2398                                 .cra_flags = CRYPTO_ALG_ASYNC,
2399                         },
2400                         .ivsize = DES3_EDE_BLOCK_SIZE,
2401                         .maxauthsize = SHA224_DIGEST_SIZE,
2402                 },
2403                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2404                                      DESC_HDR_SEL0_DEU |
2405                                      DESC_HDR_MODE0_DEU_CBC |
2406                                      DESC_HDR_MODE0_DEU_3DES |
2407                                      DESC_HDR_SEL1_MDEUA |
2408                                      DESC_HDR_MODE1_MDEU_INIT |
2409                                      DESC_HDR_MODE1_MDEU_PAD |
2410                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2411         },
2412         {       .type = CRYPTO_ALG_TYPE_AEAD,
2413                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2414                 .alg.aead = {
2415                         .base = {
2416                                 .cra_name = "authenc(hmac(sha224),"
2417                                             "cbc(des3_ede))",
2418                                 .cra_driver_name = "authenc-hmac-sha224-"
2419                                                    "cbc-3des-talitos",
2420                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2421                                 .cra_flags = CRYPTO_ALG_ASYNC,
2422                         },
2423                         .ivsize = DES3_EDE_BLOCK_SIZE,
2424                         .maxauthsize = SHA224_DIGEST_SIZE,
2425                 },
2426                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2427                                      DESC_HDR_SEL0_DEU |
2428                                      DESC_HDR_MODE0_DEU_CBC |
2429                                      DESC_HDR_MODE0_DEU_3DES |
2430                                      DESC_HDR_SEL1_MDEUA |
2431                                      DESC_HDR_MODE1_MDEU_INIT |
2432                                      DESC_HDR_MODE1_MDEU_PAD |
2433                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2434         },
2435         {       .type = CRYPTO_ALG_TYPE_AEAD,
2436                 .alg.aead = {
2437                         .base = {
2438                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2439                                 .cra_driver_name = "authenc-hmac-sha256-"
2440                                                    "cbc-aes-talitos",
2441                                 .cra_blocksize = AES_BLOCK_SIZE,
2442                                 .cra_flags = CRYPTO_ALG_ASYNC,
2443                         },
2444                         .ivsize = AES_BLOCK_SIZE,
2445                         .maxauthsize = SHA256_DIGEST_SIZE,
2446                 },
2447                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2448                                      DESC_HDR_SEL0_AESU |
2449                                      DESC_HDR_MODE0_AESU_CBC |
2450                                      DESC_HDR_SEL1_MDEUA |
2451                                      DESC_HDR_MODE1_MDEU_INIT |
2452                                      DESC_HDR_MODE1_MDEU_PAD |
2453                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2454         },
2455         {       .type = CRYPTO_ALG_TYPE_AEAD,
2456                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2457                 .alg.aead = {
2458                         .base = {
2459                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2460                                 .cra_driver_name = "authenc-hmac-sha256-"
2461                                                    "cbc-aes-talitos",
2462                                 .cra_blocksize = AES_BLOCK_SIZE,
2463                                 .cra_flags = CRYPTO_ALG_ASYNC,
2464                         },
2465                         .ivsize = AES_BLOCK_SIZE,
2466                         .maxauthsize = SHA256_DIGEST_SIZE,
2467                 },
2468                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2469                                      DESC_HDR_SEL0_AESU |
2470                                      DESC_HDR_MODE0_AESU_CBC |
2471                                      DESC_HDR_SEL1_MDEUA |
2472                                      DESC_HDR_MODE1_MDEU_INIT |
2473                                      DESC_HDR_MODE1_MDEU_PAD |
2474                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2475         },
2476         {       .type = CRYPTO_ALG_TYPE_AEAD,
2477                 .alg.aead = {
2478                         .base = {
2479                                 .cra_name = "authenc(hmac(sha256),"
2480                                             "cbc(des3_ede))",
2481                                 .cra_driver_name = "authenc-hmac-sha256-"
2482                                                    "cbc-3des-talitos",
2483                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2484                                 .cra_flags = CRYPTO_ALG_ASYNC,
2485                         },
2486                         .ivsize = DES3_EDE_BLOCK_SIZE,
2487                         .maxauthsize = SHA256_DIGEST_SIZE,
2488                 },
2489                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2490                                      DESC_HDR_SEL0_DEU |
2491                                      DESC_HDR_MODE0_DEU_CBC |
2492                                      DESC_HDR_MODE0_DEU_3DES |
2493                                      DESC_HDR_SEL1_MDEUA |
2494                                      DESC_HDR_MODE1_MDEU_INIT |
2495                                      DESC_HDR_MODE1_MDEU_PAD |
2496                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2497         },
2498         {       .type = CRYPTO_ALG_TYPE_AEAD,
2499                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2500                 .alg.aead = {
2501                         .base = {
2502                                 .cra_name = "authenc(hmac(sha256),"
2503                                             "cbc(des3_ede))",
2504                                 .cra_driver_name = "authenc-hmac-sha256-"
2505                                                    "cbc-3des-talitos",
2506                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2507                                 .cra_flags = CRYPTO_ALG_ASYNC,
2508                         },
2509                         .ivsize = DES3_EDE_BLOCK_SIZE,
2510                         .maxauthsize = SHA256_DIGEST_SIZE,
2511                 },
2512                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2513                                      DESC_HDR_SEL0_DEU |
2514                                      DESC_HDR_MODE0_DEU_CBC |
2515                                      DESC_HDR_MODE0_DEU_3DES |
2516                                      DESC_HDR_SEL1_MDEUA |
2517                                      DESC_HDR_MODE1_MDEU_INIT |
2518                                      DESC_HDR_MODE1_MDEU_PAD |
2519                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2520         },
2521         {       .type = CRYPTO_ALG_TYPE_AEAD,
2522                 .alg.aead = {
2523                         .base = {
2524                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2525                                 .cra_driver_name = "authenc-hmac-sha384-"
2526                                                    "cbc-aes-talitos",
2527                                 .cra_blocksize = AES_BLOCK_SIZE,
2528                                 .cra_flags = CRYPTO_ALG_ASYNC,
2529                         },
2530                         .ivsize = AES_BLOCK_SIZE,
2531                         .maxauthsize = SHA384_DIGEST_SIZE,
2532                 },
2533                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2534                                      DESC_HDR_SEL0_AESU |
2535                                      DESC_HDR_MODE0_AESU_CBC |
2536                                      DESC_HDR_SEL1_MDEUB |
2537                                      DESC_HDR_MODE1_MDEU_INIT |
2538                                      DESC_HDR_MODE1_MDEU_PAD |
2539                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2540         },
2541         {       .type = CRYPTO_ALG_TYPE_AEAD,
2542                 .alg.aead = {
2543                         .base = {
2544                                 .cra_name = "authenc(hmac(sha384),"
2545                                             "cbc(des3_ede))",
2546                                 .cra_driver_name = "authenc-hmac-sha384-"
2547                                                    "cbc-3des-talitos",
2548                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2549                                 .cra_flags = CRYPTO_ALG_ASYNC,
2550                         },
2551                         .ivsize = DES3_EDE_BLOCK_SIZE,
2552                         .maxauthsize = SHA384_DIGEST_SIZE,
2553                 },
2554                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2555                                      DESC_HDR_SEL0_DEU |
2556                                      DESC_HDR_MODE0_DEU_CBC |
2557                                      DESC_HDR_MODE0_DEU_3DES |
2558                                      DESC_HDR_SEL1_MDEUB |
2559                                      DESC_HDR_MODE1_MDEU_INIT |
2560                                      DESC_HDR_MODE1_MDEU_PAD |
2561                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2562         },
2563         {       .type = CRYPTO_ALG_TYPE_AEAD,
2564                 .alg.aead = {
2565                         .base = {
2566                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2567                                 .cra_driver_name = "authenc-hmac-sha512-"
2568                                                    "cbc-aes-talitos",
2569                                 .cra_blocksize = AES_BLOCK_SIZE,
2570                                 .cra_flags = CRYPTO_ALG_ASYNC,
2571                         },
2572                         .ivsize = AES_BLOCK_SIZE,
2573                         .maxauthsize = SHA512_DIGEST_SIZE,
2574                 },
2575                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2576                                      DESC_HDR_SEL0_AESU |
2577                                      DESC_HDR_MODE0_AESU_CBC |
2578                                      DESC_HDR_SEL1_MDEUB |
2579                                      DESC_HDR_MODE1_MDEU_INIT |
2580                                      DESC_HDR_MODE1_MDEU_PAD |
2581                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2582         },
2583         {       .type = CRYPTO_ALG_TYPE_AEAD,
2584                 .alg.aead = {
2585                         .base = {
2586                                 .cra_name = "authenc(hmac(sha512),"
2587                                             "cbc(des3_ede))",
2588                                 .cra_driver_name = "authenc-hmac-sha512-"
2589                                                    "cbc-3des-talitos",
2590                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2591                                 .cra_flags = CRYPTO_ALG_ASYNC,
2592                         },
2593                         .ivsize = DES3_EDE_BLOCK_SIZE,
2594                         .maxauthsize = SHA512_DIGEST_SIZE,
2595                 },
2596                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2597                                      DESC_HDR_SEL0_DEU |
2598                                      DESC_HDR_MODE0_DEU_CBC |
2599                                      DESC_HDR_MODE0_DEU_3DES |
2600                                      DESC_HDR_SEL1_MDEUB |
2601                                      DESC_HDR_MODE1_MDEU_INIT |
2602                                      DESC_HDR_MODE1_MDEU_PAD |
2603                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2604         },
2605         {       .type = CRYPTO_ALG_TYPE_AEAD,
2606                 .alg.aead = {
2607                         .base = {
2608                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2609                                 .cra_driver_name = "authenc-hmac-md5-"
2610                                                    "cbc-aes-talitos",
2611                                 .cra_blocksize = AES_BLOCK_SIZE,
2612                                 .cra_flags = CRYPTO_ALG_ASYNC,
2613                         },
2614                         .ivsize = AES_BLOCK_SIZE,
2615                         .maxauthsize = MD5_DIGEST_SIZE,
2616                 },
2617                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2618                                      DESC_HDR_SEL0_AESU |
2619                                      DESC_HDR_MODE0_AESU_CBC |
2620                                      DESC_HDR_SEL1_MDEUA |
2621                                      DESC_HDR_MODE1_MDEU_INIT |
2622                                      DESC_HDR_MODE1_MDEU_PAD |
2623                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2624         },
2625         {       .type = CRYPTO_ALG_TYPE_AEAD,
2626                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2627                 .alg.aead = {
2628                         .base = {
2629                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2630                                 .cra_driver_name = "authenc-hmac-md5-"
2631                                                    "cbc-aes-talitos",
2632                                 .cra_blocksize = AES_BLOCK_SIZE,
2633                                 .cra_flags = CRYPTO_ALG_ASYNC,
2634                         },
2635                         .ivsize = AES_BLOCK_SIZE,
2636                         .maxauthsize = MD5_DIGEST_SIZE,
2637                 },
2638                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2639                                      DESC_HDR_SEL0_AESU |
2640                                      DESC_HDR_MODE0_AESU_CBC |
2641                                      DESC_HDR_SEL1_MDEUA |
2642                                      DESC_HDR_MODE1_MDEU_INIT |
2643                                      DESC_HDR_MODE1_MDEU_PAD |
2644                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2645         },
2646         {       .type = CRYPTO_ALG_TYPE_AEAD,
2647                 .alg.aead = {
2648                         .base = {
2649                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2650                                 .cra_driver_name = "authenc-hmac-md5-"
2651                                                    "cbc-3des-talitos",
2652                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2653                                 .cra_flags = CRYPTO_ALG_ASYNC,
2654                         },
2655                         .ivsize = DES3_EDE_BLOCK_SIZE,
2656                         .maxauthsize = MD5_DIGEST_SIZE,
2657                 },
2658                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2659                                      DESC_HDR_SEL0_DEU |
2660                                      DESC_HDR_MODE0_DEU_CBC |
2661                                      DESC_HDR_MODE0_DEU_3DES |
2662                                      DESC_HDR_SEL1_MDEUA |
2663                                      DESC_HDR_MODE1_MDEU_INIT |
2664                                      DESC_HDR_MODE1_MDEU_PAD |
2665                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2666         },
2667         {       .type = CRYPTO_ALG_TYPE_AEAD,
2668                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2669                 .alg.aead = {
2670                         .base = {
2671                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2672                                 .cra_driver_name = "authenc-hmac-md5-"
2673                                                    "cbc-3des-talitos",
2674                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2675                                 .cra_flags = CRYPTO_ALG_ASYNC,
2676                         },
2677                         .ivsize = DES3_EDE_BLOCK_SIZE,
2678                         .maxauthsize = MD5_DIGEST_SIZE,
2679                 },
2680                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2681                                      DESC_HDR_SEL0_DEU |
2682                                      DESC_HDR_MODE0_DEU_CBC |
2683                                      DESC_HDR_MODE0_DEU_3DES |
2684                                      DESC_HDR_SEL1_MDEUA |
2685                                      DESC_HDR_MODE1_MDEU_INIT |
2686                                      DESC_HDR_MODE1_MDEU_PAD |
2687                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2688         },
2689         /* ABLKCIPHER algorithms. */
2690         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2691                 .alg.crypto = {
2692                         .cra_name = "ecb(aes)",
2693                         .cra_driver_name = "ecb-aes-talitos",
2694                         .cra_blocksize = AES_BLOCK_SIZE,
2695                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2696                                      CRYPTO_ALG_ASYNC,
2697                         .cra_ablkcipher = {
2698                                 .min_keysize = AES_MIN_KEY_SIZE,
2699                                 .max_keysize = AES_MAX_KEY_SIZE,
2700                                 .ivsize = AES_BLOCK_SIZE,
2701                         }
2702                 },
2703                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2704                                      DESC_HDR_SEL0_AESU,
2705         },
2706         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2707                 .alg.crypto = {
2708                         .cra_name = "cbc(aes)",
2709                         .cra_driver_name = "cbc-aes-talitos",
2710                         .cra_blocksize = AES_BLOCK_SIZE,
2711                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2712                                      CRYPTO_ALG_ASYNC,
2713                         .cra_ablkcipher = {
2714                                 .min_keysize = AES_MIN_KEY_SIZE,
2715                                 .max_keysize = AES_MAX_KEY_SIZE,
2716                                 .ivsize = AES_BLOCK_SIZE,
2717                         }
2718                 },
2719                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2720                                      DESC_HDR_SEL0_AESU |
2721                                      DESC_HDR_MODE0_AESU_CBC,
2722         },
2723         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2724                 .alg.crypto = {
2725                         .cra_name = "ctr(aes)",
2726                         .cra_driver_name = "ctr-aes-talitos",
2727                         .cra_blocksize = AES_BLOCK_SIZE,
2728                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2729                                      CRYPTO_ALG_ASYNC,
2730                         .cra_ablkcipher = {
2731                                 .min_keysize = AES_MIN_KEY_SIZE,
2732                                 .max_keysize = AES_MAX_KEY_SIZE,
2733                                 .ivsize = AES_BLOCK_SIZE,
2734                         }
2735                 },
2736                 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2737                                      DESC_HDR_SEL0_AESU |
2738                                      DESC_HDR_MODE0_AESU_CTR,
2739         },
2740         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2741                 .alg.crypto = {
2742                         .cra_name = "ecb(des)",
2743                         .cra_driver_name = "ecb-des-talitos",
2744                         .cra_blocksize = DES_BLOCK_SIZE,
2745                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2746                                      CRYPTO_ALG_ASYNC,
2747                         .cra_ablkcipher = {
2748                                 .min_keysize = DES_KEY_SIZE,
2749                                 .max_keysize = DES_KEY_SIZE,
2750                                 .ivsize = DES_BLOCK_SIZE,
2751                         }
2752                 },
2753                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2754                                      DESC_HDR_SEL0_DEU,
2755         },
2756         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2757                 .alg.crypto = {
2758                         .cra_name = "cbc(des)",
2759                         .cra_driver_name = "cbc-des-talitos",
2760                         .cra_blocksize = DES_BLOCK_SIZE,
2761                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2762                                      CRYPTO_ALG_ASYNC,
2763                         .cra_ablkcipher = {
2764                                 .min_keysize = DES_KEY_SIZE,
2765                                 .max_keysize = DES_KEY_SIZE,
2766                                 .ivsize = DES_BLOCK_SIZE,
2767                         }
2768                 },
2769                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2770                                      DESC_HDR_SEL0_DEU |
2771                                      DESC_HDR_MODE0_DEU_CBC,
2772         },
2773         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2774                 .alg.crypto = {
2775                         .cra_name = "ecb(des3_ede)",
2776                         .cra_driver_name = "ecb-3des-talitos",
2777                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2778                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2779                                      CRYPTO_ALG_ASYNC,
2780                         .cra_ablkcipher = {
2781                                 .min_keysize = DES3_EDE_KEY_SIZE,
2782                                 .max_keysize = DES3_EDE_KEY_SIZE,
2783                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2784                         }
2785                 },
2786                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2787                                      DESC_HDR_SEL0_DEU |
2788                                      DESC_HDR_MODE0_DEU_3DES,
2789         },
2790         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2791                 .alg.crypto = {
2792                         .cra_name = "cbc(des3_ede)",
2793                         .cra_driver_name = "cbc-3des-talitos",
2794                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2795                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2796                                      CRYPTO_ALG_ASYNC,
2797                         .cra_ablkcipher = {
2798                                 .min_keysize = DES3_EDE_KEY_SIZE,
2799                                 .max_keysize = DES3_EDE_KEY_SIZE,
2800                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2801                         }
2802                 },
2803                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2804                                      DESC_HDR_SEL0_DEU |
2805                                      DESC_HDR_MODE0_DEU_CBC |
2806                                      DESC_HDR_MODE0_DEU_3DES,
2807         },
2808         /* AHASH algorithms. */
2809         {       .type = CRYPTO_ALG_TYPE_AHASH,
2810                 .alg.hash = {
2811                         .halg.digestsize = MD5_DIGEST_SIZE,
2812                         .halg.statesize = sizeof(struct talitos_export_state),
2813                         .halg.base = {
2814                                 .cra_name = "md5",
2815                                 .cra_driver_name = "md5-talitos",
2816                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2817                                 .cra_flags = CRYPTO_ALG_ASYNC,
2818                         }
2819                 },
2820                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2821                                      DESC_HDR_SEL0_MDEUA |
2822                                      DESC_HDR_MODE0_MDEU_MD5,
2823         },
2824         {       .type = CRYPTO_ALG_TYPE_AHASH,
2825                 .alg.hash = {
2826                         .halg.digestsize = SHA1_DIGEST_SIZE,
2827                         .halg.statesize = sizeof(struct talitos_export_state),
2828                         .halg.base = {
2829                                 .cra_name = "sha1",
2830                                 .cra_driver_name = "sha1-talitos",
2831                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2832                                 .cra_flags = CRYPTO_ALG_ASYNC,
2833                         }
2834                 },
2835                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2836                                      DESC_HDR_SEL0_MDEUA |
2837                                      DESC_HDR_MODE0_MDEU_SHA1,
2838         },
2839         {       .type = CRYPTO_ALG_TYPE_AHASH,
2840                 .alg.hash = {
2841                         .halg.digestsize = SHA224_DIGEST_SIZE,
2842                         .halg.statesize = sizeof(struct talitos_export_state),
2843                         .halg.base = {
2844                                 .cra_name = "sha224",
2845                                 .cra_driver_name = "sha224-talitos",
2846                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2847                                 .cra_flags = CRYPTO_ALG_ASYNC,
2848                         }
2849                 },
2850                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2851                                      DESC_HDR_SEL0_MDEUA |
2852                                      DESC_HDR_MODE0_MDEU_SHA224,
2853         },
2854         {       .type = CRYPTO_ALG_TYPE_AHASH,
2855                 .alg.hash = {
2856                         .halg.digestsize = SHA256_DIGEST_SIZE,
2857                         .halg.statesize = sizeof(struct talitos_export_state),
2858                         .halg.base = {
2859                                 .cra_name = "sha256",
2860                                 .cra_driver_name = "sha256-talitos",
2861                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2862                                 .cra_flags = CRYPTO_ALG_ASYNC,
2863                         }
2864                 },
2865                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2866                                      DESC_HDR_SEL0_MDEUA |
2867                                      DESC_HDR_MODE0_MDEU_SHA256,
2868         },
2869         {       .type = CRYPTO_ALG_TYPE_AHASH,
2870                 .alg.hash = {
2871                         .halg.digestsize = SHA384_DIGEST_SIZE,
2872                         .halg.statesize = sizeof(struct talitos_export_state),
2873                         .halg.base = {
2874                                 .cra_name = "sha384",
2875                                 .cra_driver_name = "sha384-talitos",
2876                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2877                                 .cra_flags = CRYPTO_ALG_ASYNC,
2878                         }
2879                 },
2880                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2881                                      DESC_HDR_SEL0_MDEUB |
2882                                      DESC_HDR_MODE0_MDEUB_SHA384,
2883         },
2884         {       .type = CRYPTO_ALG_TYPE_AHASH,
2885                 .alg.hash = {
2886                         .halg.digestsize = SHA512_DIGEST_SIZE,
2887                         .halg.statesize = sizeof(struct talitos_export_state),
2888                         .halg.base = {
2889                                 .cra_name = "sha512",
2890                                 .cra_driver_name = "sha512-talitos",
2891                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2892                                 .cra_flags = CRYPTO_ALG_ASYNC,
2893                         }
2894                 },
2895                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2896                                      DESC_HDR_SEL0_MDEUB |
2897                                      DESC_HDR_MODE0_MDEUB_SHA512,
2898         },
2899         {       .type = CRYPTO_ALG_TYPE_AHASH,
2900                 .alg.hash = {
2901                         .halg.digestsize = MD5_DIGEST_SIZE,
2902                         .halg.statesize = sizeof(struct talitos_export_state),
2903                         .halg.base = {
2904                                 .cra_name = "hmac(md5)",
2905                                 .cra_driver_name = "hmac-md5-talitos",
2906                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2907                                 .cra_flags = CRYPTO_ALG_ASYNC,
2908                         }
2909                 },
2910                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2911                                      DESC_HDR_SEL0_MDEUA |
2912                                      DESC_HDR_MODE0_MDEU_MD5,
2913         },
2914         {       .type = CRYPTO_ALG_TYPE_AHASH,
2915                 .alg.hash = {
2916                         .halg.digestsize = SHA1_DIGEST_SIZE,
2917                         .halg.statesize = sizeof(struct talitos_export_state),
2918                         .halg.base = {
2919                                 .cra_name = "hmac(sha1)",
2920                                 .cra_driver_name = "hmac-sha1-talitos",
2921                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2922                                 .cra_flags = CRYPTO_ALG_ASYNC,
2923                         }
2924                 },
2925                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926                                      DESC_HDR_SEL0_MDEUA |
2927                                      DESC_HDR_MODE0_MDEU_SHA1,
2928         },
2929         {       .type = CRYPTO_ALG_TYPE_AHASH,
2930                 .alg.hash = {
2931                         .halg.digestsize = SHA224_DIGEST_SIZE,
2932                         .halg.statesize = sizeof(struct talitos_export_state),
2933                         .halg.base = {
2934                                 .cra_name = "hmac(sha224)",
2935                                 .cra_driver_name = "hmac-sha224-talitos",
2936                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2937                                 .cra_flags = CRYPTO_ALG_ASYNC,
2938                         }
2939                 },
2940                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941                                      DESC_HDR_SEL0_MDEUA |
2942                                      DESC_HDR_MODE0_MDEU_SHA224,
2943         },
2944         {       .type = CRYPTO_ALG_TYPE_AHASH,
2945                 .alg.hash = {
2946                         .halg.digestsize = SHA256_DIGEST_SIZE,
2947                         .halg.statesize = sizeof(struct talitos_export_state),
2948                         .halg.base = {
2949                                 .cra_name = "hmac(sha256)",
2950                                 .cra_driver_name = "hmac-sha256-talitos",
2951                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2952                                 .cra_flags = CRYPTO_ALG_ASYNC,
2953                         }
2954                 },
2955                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2956                                      DESC_HDR_SEL0_MDEUA |
2957                                      DESC_HDR_MODE0_MDEU_SHA256,
2958         },
2959         {       .type = CRYPTO_ALG_TYPE_AHASH,
2960                 .alg.hash = {
2961                         .halg.digestsize = SHA384_DIGEST_SIZE,
2962                         .halg.statesize = sizeof(struct talitos_export_state),
2963                         .halg.base = {
2964                                 .cra_name = "hmac(sha384)",
2965                                 .cra_driver_name = "hmac-sha384-talitos",
2966                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2967                                 .cra_flags = CRYPTO_ALG_ASYNC,
2968                         }
2969                 },
2970                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2971                                      DESC_HDR_SEL0_MDEUB |
2972                                      DESC_HDR_MODE0_MDEUB_SHA384,
2973         },
2974         {       .type = CRYPTO_ALG_TYPE_AHASH,
2975                 .alg.hash = {
2976                         .halg.digestsize = SHA512_DIGEST_SIZE,
2977                         .halg.statesize = sizeof(struct talitos_export_state),
2978                         .halg.base = {
2979                                 .cra_name = "hmac(sha512)",
2980                                 .cra_driver_name = "hmac-sha512-talitos",
2981                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2982                                 .cra_flags = CRYPTO_ALG_ASYNC,
2983                         }
2984                 },
2985                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2986                                      DESC_HDR_SEL0_MDEUB |
2987                                      DESC_HDR_MODE0_MDEUB_SHA512,
2988         }
2989 };
2990
2991 struct talitos_crypto_alg {
2992         struct list_head entry;
2993         struct device *dev;
2994         struct talitos_alg_template algt;
2995 };
2996
2997 static int talitos_init_common(struct talitos_ctx *ctx,
2998                                struct talitos_crypto_alg *talitos_alg)
2999 {
3000         struct talitos_private *priv;
3001
3002         /* update context with ptr to dev */
3003         ctx->dev = talitos_alg->dev;
3004
3005         /* assign SEC channel to tfm in round-robin fashion */
3006         priv = dev_get_drvdata(ctx->dev);
3007         ctx->ch = atomic_inc_return(&priv->last_chan) &
3008                   (priv->num_channels - 1);
3009
3010         /* copy descriptor header template value */
3011         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3012
3013         /* select done notification */
3014         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3015
3016         return 0;
3017 }
3018
3019 static int talitos_cra_init(struct crypto_tfm *tfm)
3020 {
3021         struct crypto_alg *alg = tfm->__crt_alg;
3022         struct talitos_crypto_alg *talitos_alg;
3023         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3024
3025         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3026                 talitos_alg = container_of(__crypto_ahash_alg(alg),
3027                                            struct talitos_crypto_alg,
3028                                            algt.alg.hash);
3029         else
3030                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3031                                            algt.alg.crypto);
3032
3033         return talitos_init_common(ctx, talitos_alg);
3034 }
3035
3036 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3037 {
3038         struct aead_alg *alg = crypto_aead_alg(tfm);
3039         struct talitos_crypto_alg *talitos_alg;
3040         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3041
3042         talitos_alg = container_of(alg, struct talitos_crypto_alg,
3043                                    algt.alg.aead);
3044
3045         return talitos_init_common(ctx, talitos_alg);
3046 }
3047
3048 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3049 {
3050         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3051
3052         talitos_cra_init(tfm);
3053
3054         ctx->keylen = 0;
3055         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3056                                  sizeof(struct talitos_ahash_req_ctx));
3057
3058         return 0;
3059 }
3060
3061 static void talitos_cra_exit(struct crypto_tfm *tfm)
3062 {
3063         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3064         struct device *dev = ctx->dev;
3065
3066         if (ctx->keylen)
3067                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3068 }
3069
3070 /*
3071  * given the alg's descriptor header template, determine whether descriptor
3072  * type and primary/secondary execution units required match the hw
3073  * capabilities description provided in the device tree node.
3074  */
3075 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3076 {
3077         struct talitos_private *priv = dev_get_drvdata(dev);
3078         int ret;
3079
3080         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3081               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3082
3083         if (SECONDARY_EU(desc_hdr_template))
3084                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3085                               & priv->exec_units);
3086
3087         return ret;
3088 }
3089
3090 static int talitos_remove(struct platform_device *ofdev)
3091 {
3092         struct device *dev = &ofdev->dev;
3093         struct talitos_private *priv = dev_get_drvdata(dev);
3094         struct talitos_crypto_alg *t_alg, *n;
3095         int i;
3096
3097         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3098                 switch (t_alg->algt.type) {
3099                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3100                         break;
3101                 case CRYPTO_ALG_TYPE_AEAD:
3102                         crypto_unregister_aead(&t_alg->algt.alg.aead);
3103                 case CRYPTO_ALG_TYPE_AHASH:
3104                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
3105                         break;
3106                 }
3107                 list_del(&t_alg->entry);
3108         }
3109
3110         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3111                 talitos_unregister_rng(dev);
3112
3113         for (i = 0; i < 2; i++)
3114                 if (priv->irq[i]) {
3115                         free_irq(priv->irq[i], dev);
3116                         irq_dispose_mapping(priv->irq[i]);
3117                 }
3118
3119         tasklet_kill(&priv->done_task[0]);
3120         if (priv->irq[1])
3121                 tasklet_kill(&priv->done_task[1]);
3122
3123         return 0;
3124 }
3125
3126 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3127                                                     struct talitos_alg_template
3128                                                            *template)
3129 {
3130         struct talitos_private *priv = dev_get_drvdata(dev);
3131         struct talitos_crypto_alg *t_alg;
3132         struct crypto_alg *alg;
3133
3134         t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3135                              GFP_KERNEL);
3136         if (!t_alg)
3137                 return ERR_PTR(-ENOMEM);
3138
3139         t_alg->algt = *template;
3140
3141         switch (t_alg->algt.type) {
3142         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3143                 alg = &t_alg->algt.alg.crypto;
3144                 alg->cra_init = talitos_cra_init;
3145                 alg->cra_exit = talitos_cra_exit;
3146                 alg->cra_type = &crypto_ablkcipher_type;
3147                 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3148                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3149                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3150                 break;
3151         case CRYPTO_ALG_TYPE_AEAD:
3152                 alg = &t_alg->algt.alg.aead.base;
3153                 alg->cra_exit = talitos_cra_exit;
3154                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3155                 t_alg->algt.alg.aead.setkey = aead_setkey;
3156                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3157                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3158                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3159                     !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3160                         devm_kfree(dev, t_alg);
3161                         return ERR_PTR(-ENOTSUPP);
3162                 }
3163                 break;
3164         case CRYPTO_ALG_TYPE_AHASH:
3165                 alg = &t_alg->algt.alg.hash.halg.base;
3166                 alg->cra_init = talitos_cra_init_ahash;
3167                 alg->cra_exit = talitos_cra_exit;
3168                 t_alg->algt.alg.hash.init = ahash_init;
3169                 t_alg->algt.alg.hash.update = ahash_update;
3170                 t_alg->algt.alg.hash.final = ahash_final;
3171                 t_alg->algt.alg.hash.finup = ahash_finup;
3172                 t_alg->algt.alg.hash.digest = ahash_digest;
3173                 if (!strncmp(alg->cra_name, "hmac", 4))
3174                         t_alg->algt.alg.hash.setkey = ahash_setkey;
3175                 t_alg->algt.alg.hash.import = ahash_import;
3176                 t_alg->algt.alg.hash.export = ahash_export;
3177
3178                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3179                     !strncmp(alg->cra_name, "hmac", 4)) {
3180                         devm_kfree(dev, t_alg);
3181                         return ERR_PTR(-ENOTSUPP);
3182                 }
3183                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3184                     (!strcmp(alg->cra_name, "sha224") ||
3185                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
3186                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3187                         t_alg->algt.desc_hdr_template =
3188                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3189                                         DESC_HDR_SEL0_MDEUA |
3190                                         DESC_HDR_MODE0_MDEU_SHA256;
3191                 }
3192                 break;
3193         default:
3194                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3195                 devm_kfree(dev, t_alg);
3196                 return ERR_PTR(-EINVAL);
3197         }
3198
3199         alg->cra_module = THIS_MODULE;
3200         if (t_alg->algt.priority)
3201                 alg->cra_priority = t_alg->algt.priority;
3202         else
3203                 alg->cra_priority = TALITOS_CRA_PRIORITY;
3204         alg->cra_alignmask = 0;
3205         alg->cra_ctxsize = sizeof(struct talitos_ctx);
3206         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3207
3208         t_alg->dev = dev;
3209
3210         return t_alg;
3211 }
3212
3213 static int talitos_probe_irq(struct platform_device *ofdev)
3214 {
3215         struct device *dev = &ofdev->dev;
3216         struct device_node *np = ofdev->dev.of_node;
3217         struct talitos_private *priv = dev_get_drvdata(dev);
3218         int err;
3219         bool is_sec1 = has_ftr_sec1(priv);
3220
3221         priv->irq[0] = irq_of_parse_and_map(np, 0);
3222         if (!priv->irq[0]) {
3223                 dev_err(dev, "failed to map irq\n");
3224                 return -EINVAL;
3225         }
3226         if (is_sec1) {
3227                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3228                                   dev_driver_string(dev), dev);
3229                 goto primary_out;
3230         }
3231
3232         priv->irq[1] = irq_of_parse_and_map(np, 1);
3233
3234         /* get the primary irq line */
3235         if (!priv->irq[1]) {
3236                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3237                                   dev_driver_string(dev), dev);
3238                 goto primary_out;
3239         }
3240
3241         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3242                           dev_driver_string(dev), dev);
3243         if (err)
3244                 goto primary_out;
3245
3246         /* get the secondary irq line */
3247         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3248                           dev_driver_string(dev), dev);
3249         if (err) {
3250                 dev_err(dev, "failed to request secondary irq\n");
3251                 irq_dispose_mapping(priv->irq[1]);
3252                 priv->irq[1] = 0;
3253         }
3254
3255         return err;
3256
3257 primary_out:
3258         if (err) {
3259                 dev_err(dev, "failed to request primary irq\n");
3260                 irq_dispose_mapping(priv->irq[0]);
3261                 priv->irq[0] = 0;
3262         }
3263
3264         return err;
3265 }
3266
3267 static int talitos_probe(struct platform_device *ofdev)
3268 {
3269         struct device *dev = &ofdev->dev;
3270         struct device_node *np = ofdev->dev.of_node;
3271         struct talitos_private *priv;
3272         int i, err;
3273         int stride;
3274         struct resource *res;
3275
3276         priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3277         if (!priv)
3278                 return -ENOMEM;
3279
3280         INIT_LIST_HEAD(&priv->alg_list);
3281
3282         dev_set_drvdata(dev, priv);
3283
3284         priv->ofdev = ofdev;
3285
3286         spin_lock_init(&priv->reg_lock);
3287
3288         res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3289         if (!res)
3290                 return -ENXIO;
3291         priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3292         if (!priv->reg) {
3293                 dev_err(dev, "failed to of_iomap\n");
3294                 err = -ENOMEM;
3295                 goto err_out;
3296         }
3297
3298         /* get SEC version capabilities from device tree */
3299         of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3300         of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3301         of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3302         of_property_read_u32(np, "fsl,descriptor-types-mask",
3303                              &priv->desc_types);
3304
3305         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3306             !priv->exec_units || !priv->desc_types) {
3307                 dev_err(dev, "invalid property data in device tree node\n");
3308                 err = -EINVAL;
3309                 goto err_out;
3310         }
3311
3312         if (of_device_is_compatible(np, "fsl,sec3.0"))
3313                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3314
3315         if (of_device_is_compatible(np, "fsl,sec2.1"))
3316                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3317                                   TALITOS_FTR_SHA224_HWINIT |
3318                                   TALITOS_FTR_HMAC_OK;
3319
3320         if (of_device_is_compatible(np, "fsl,sec1.0"))
3321                 priv->features |= TALITOS_FTR_SEC1;
3322
3323         if (of_device_is_compatible(np, "fsl,sec1.2")) {
3324                 priv->reg_deu = priv->reg + TALITOS12_DEU;
3325                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3326                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3327                 stride = TALITOS1_CH_STRIDE;
3328         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3329                 priv->reg_deu = priv->reg + TALITOS10_DEU;
3330                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3331                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3332                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3333                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3334                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3335                 stride = TALITOS1_CH_STRIDE;
3336         } else {
3337                 priv->reg_deu = priv->reg + TALITOS2_DEU;
3338                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3339                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3340                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3341                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3342                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3343                 priv->reg_keu = priv->reg + TALITOS2_KEU;
3344                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3345                 stride = TALITOS2_CH_STRIDE;
3346         }
3347
3348         err = talitos_probe_irq(ofdev);
3349         if (err)
3350                 goto err_out;
3351
3352         if (of_device_is_compatible(np, "fsl,sec1.0")) {
3353                 if (priv->num_channels == 1)
3354                         tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3355                                      (unsigned long)dev);
3356                 else
3357                         tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3358                                      (unsigned long)dev);
3359         } else {
3360                 if (priv->irq[1]) {
3361                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3362                                      (unsigned long)dev);
3363                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3364                                      (unsigned long)dev);
3365                 } else if (priv->num_channels == 1) {
3366                         tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3367                                      (unsigned long)dev);
3368                 } else {
3369                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3370                                      (unsigned long)dev);
3371                 }
3372         }
3373
3374         priv->chan = devm_kcalloc(dev,
3375                                   priv->num_channels,
3376                                   sizeof(struct talitos_channel),
3377                                   GFP_KERNEL);
3378         if (!priv->chan) {
3379                 dev_err(dev, "failed to allocate channel management space\n");
3380                 err = -ENOMEM;
3381                 goto err_out;
3382         }
3383
3384         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3385
3386         for (i = 0; i < priv->num_channels; i++) {
3387                 priv->chan[i].reg = priv->reg + stride * (i + 1);
3388                 if (!priv->irq[1] || !(i & 1))
3389                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3390
3391                 spin_lock_init(&priv->chan[i].head_lock);
3392                 spin_lock_init(&priv->chan[i].tail_lock);
3393
3394                 priv->chan[i].fifo = devm_kcalloc(dev,
3395                                                 priv->fifo_len,
3396                                                 sizeof(struct talitos_request),
3397                                                 GFP_KERNEL);
3398                 if (!priv->chan[i].fifo) {
3399                         dev_err(dev, "failed to allocate request fifo %d\n", i);
3400                         err = -ENOMEM;
3401                         goto err_out;
3402                 }
3403
3404                 atomic_set(&priv->chan[i].submit_count,
3405                            -(priv->chfifo_len - 1));
3406         }
3407
3408         dma_set_mask(dev, DMA_BIT_MASK(36));
3409
3410         /* reset and initialize the h/w */
3411         err = init_device(dev);
3412         if (err) {
3413                 dev_err(dev, "failed to initialize device\n");
3414                 goto err_out;
3415         }
3416
3417         /* register the RNG, if available */
3418         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3419                 err = talitos_register_rng(dev);
3420                 if (err) {
3421                         dev_err(dev, "failed to register hwrng: %d\n", err);
3422                         goto err_out;
3423                 } else
3424                         dev_info(dev, "hwrng\n");
3425         }
3426
3427         /* register crypto algorithms the device supports */
3428         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3429                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3430                         struct talitos_crypto_alg *t_alg;
3431                         struct crypto_alg *alg = NULL;
3432
3433                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3434                         if (IS_ERR(t_alg)) {
3435                                 err = PTR_ERR(t_alg);
3436                                 if (err == -ENOTSUPP)
3437                                         continue;
3438                                 goto err_out;
3439                         }
3440
3441                         switch (t_alg->algt.type) {
3442                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3443                                 err = crypto_register_alg(
3444                                                 &t_alg->algt.alg.crypto);
3445                                 alg = &t_alg->algt.alg.crypto;
3446                                 break;
3447
3448                         case CRYPTO_ALG_TYPE_AEAD:
3449                                 err = crypto_register_aead(
3450                                         &t_alg->algt.alg.aead);
3451                                 alg = &t_alg->algt.alg.aead.base;
3452                                 break;
3453
3454                         case CRYPTO_ALG_TYPE_AHASH:
3455                                 err = crypto_register_ahash(
3456                                                 &t_alg->algt.alg.hash);
3457                                 alg = &t_alg->algt.alg.hash.halg.base;
3458                                 break;
3459                         }
3460                         if (err) {
3461                                 dev_err(dev, "%s alg registration failed\n",
3462                                         alg->cra_driver_name);
3463                                 devm_kfree(dev, t_alg);
3464                         } else
3465                                 list_add_tail(&t_alg->entry, &priv->alg_list);
3466                 }
3467         }
3468         if (!list_empty(&priv->alg_list))
3469                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3470                          (char *)of_get_property(np, "compatible", NULL));
3471
3472         return 0;
3473
3474 err_out:
3475         talitos_remove(ofdev);
3476
3477         return err;
3478 }
3479
3480 static const struct of_device_id talitos_match[] = {
3481 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3482         {
3483                 .compatible = "fsl,sec1.0",
3484         },
3485 #endif
3486 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3487         {
3488                 .compatible = "fsl,sec2.0",
3489         },
3490 #endif
3491         {},
3492 };
3493 MODULE_DEVICE_TABLE(of, talitos_match);
3494
3495 static struct platform_driver talitos_driver = {
3496         .driver = {
3497                 .name = "talitos",
3498                 .of_match_table = talitos_match,
3499         },
3500         .probe = talitos_probe,
3501         .remove = talitos_remove,
3502 };
3503
3504 module_platform_driver(talitos_driver);
3505
3506 MODULE_LICENSE("GPL");
3507 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3508 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");