Merge branch 'fix/asoc' into for-linus
[sfrench/cifs-2.6.git] / drivers / crypto / amcc / crypto4xx_core.c
1 /**
2  * AMCC SoC PPC4xx Crypto Driver
3  *
4  * Copyright (c) 2008 Applied Micro Circuits Corporation.
5  * All rights reserved. James Hsiao <jhsiao@amcc.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * This file implements AMCC crypto offload Linux device driver for use with
18  * Linux CryptoAPI.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/of_platform.h>
31 #include <linux/slab.h>
32 #include <asm/dcr.h>
33 #include <asm/dcr-regs.h>
34 #include <asm/cacheflush.h>
35 #include <crypto/aes.h>
36 #include <crypto/sha.h>
37 #include "crypto4xx_reg_def.h"
38 #include "crypto4xx_core.h"
39 #include "crypto4xx_sa.h"
40
41 #define PPC4XX_SEC_VERSION_STR                  "0.5"
42
43 /**
44  * PPC4xx Crypto Engine Initialization Routine
45  */
46 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
47 {
48         union ce_ring_size ring_size;
49         union ce_ring_contol ring_ctrl;
50         union ce_part_ring_size part_ring_size;
51         union ce_io_threshold io_threshold;
52         u32 rand_num;
53         union ce_pe_dma_cfg pe_dma_cfg;
54
55         writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
56         /* setup pe dma, include reset sg, pdr and pe, then release reset */
57         pe_dma_cfg.w = 0;
58         pe_dma_cfg.bf.bo_sgpd_en = 1;
59         pe_dma_cfg.bf.bo_data_en = 0;
60         pe_dma_cfg.bf.bo_sa_en = 1;
61         pe_dma_cfg.bf.bo_pd_en = 1;
62         pe_dma_cfg.bf.dynamic_sa_en = 1;
63         pe_dma_cfg.bf.reset_sg = 1;
64         pe_dma_cfg.bf.reset_pdr = 1;
65         pe_dma_cfg.bf.reset_pe = 1;
66         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
67         /* un reset pe,sg and pdr */
68         pe_dma_cfg.bf.pe_mode = 0;
69         pe_dma_cfg.bf.reset_sg = 0;
70         pe_dma_cfg.bf.reset_pdr = 0;
71         pe_dma_cfg.bf.reset_pe = 0;
72         pe_dma_cfg.bf.bo_td_en = 0;
73         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
74         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
75         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
76         writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
77         get_random_bytes(&rand_num, sizeof(rand_num));
78         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
79         get_random_bytes(&rand_num, sizeof(rand_num));
80         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
81         ring_size.w = 0;
82         ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
83         ring_size.bf.ring_size   = PPC4XX_NUM_PD;
84         writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
85         ring_ctrl.w = 0;
86         writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
87         writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
88         writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
89         writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
90         part_ring_size.w = 0;
91         part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
92         part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
93         writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
94         writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
95         io_threshold.w = 0;
96         io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
97         io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
98         writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
99         writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
100         writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
101         writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
102         writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
103         writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
104         writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
105         writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
106         /* un reset pe,sg and pdr */
107         pe_dma_cfg.bf.pe_mode = 1;
108         pe_dma_cfg.bf.reset_sg = 0;
109         pe_dma_cfg.bf.reset_pdr = 0;
110         pe_dma_cfg.bf.reset_pe = 0;
111         pe_dma_cfg.bf.bo_td_en = 0;
112         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
113         /*clear all pending interrupt*/
114         writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
115         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
116         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
117         writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
118         writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
119 }
120
121 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
122 {
123         ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
124                                         &ctx->sa_in_dma_addr, GFP_ATOMIC);
125         if (ctx->sa_in == NULL)
126                 return -ENOMEM;
127
128         ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
129                                          &ctx->sa_out_dma_addr, GFP_ATOMIC);
130         if (ctx->sa_out == NULL) {
131                 dma_free_coherent(ctx->dev->core_dev->device,
132                                   ctx->sa_len * 4,
133                                   ctx->sa_in, ctx->sa_in_dma_addr);
134                 return -ENOMEM;
135         }
136
137         memset(ctx->sa_in, 0, size * 4);
138         memset(ctx->sa_out, 0, size * 4);
139         ctx->sa_len = size;
140
141         return 0;
142 }
143
144 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
145 {
146         if (ctx->sa_in != NULL)
147                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
148                                   ctx->sa_in, ctx->sa_in_dma_addr);
149         if (ctx->sa_out != NULL)
150                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
151                                   ctx->sa_out, ctx->sa_out_dma_addr);
152
153         ctx->sa_in_dma_addr = 0;
154         ctx->sa_out_dma_addr = 0;
155         ctx->sa_len = 0;
156 }
157
158 u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
159 {
160         ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
161                                 sizeof(struct sa_state_record),
162                                 &ctx->state_record_dma_addr, GFP_ATOMIC);
163         if (!ctx->state_record_dma_addr)
164                 return -ENOMEM;
165         memset(ctx->state_record, 0, sizeof(struct sa_state_record));
166
167         return 0;
168 }
169
170 void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
171 {
172         if (ctx->state_record != NULL)
173                 dma_free_coherent(ctx->dev->core_dev->device,
174                                   sizeof(struct sa_state_record),
175                                   ctx->state_record,
176                                   ctx->state_record_dma_addr);
177         ctx->state_record_dma_addr = 0;
178 }
179
180 /**
181  * alloc memory for the gather ring
182  * no need to alloc buf for the ring
183  * gdr_tail, gdr_head and gdr_count are initialized by this function
184  */
185 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
186 {
187         int i;
188         struct pd_uinfo *pd_uinfo;
189         dev->pdr = dma_alloc_coherent(dev->core_dev->device,
190                                       sizeof(struct ce_pd) * PPC4XX_NUM_PD,
191                                       &dev->pdr_pa, GFP_ATOMIC);
192         if (!dev->pdr)
193                 return -ENOMEM;
194
195         dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
196                                 GFP_KERNEL);
197         if (!dev->pdr_uinfo) {
198                 dma_free_coherent(dev->core_dev->device,
199                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
200                                   dev->pdr,
201                                   dev->pdr_pa);
202                 return -ENOMEM;
203         }
204         memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
205         dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
206                                    256 * PPC4XX_NUM_PD,
207                                    &dev->shadow_sa_pool_pa,
208                                    GFP_ATOMIC);
209         if (!dev->shadow_sa_pool)
210                 return -ENOMEM;
211
212         dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
213                          sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
214                          &dev->shadow_sr_pool_pa, GFP_ATOMIC);
215         if (!dev->shadow_sr_pool)
216                 return -ENOMEM;
217         for (i = 0; i < PPC4XX_NUM_PD; i++) {
218                 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
219                                                 sizeof(struct pd_uinfo) * i);
220
221                 /* alloc 256 bytes which is enough for any kind of dynamic sa */
222                 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
223                 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
224
225                 /* alloc state record */
226                 pd_uinfo->sr_va = dev->shadow_sr_pool +
227                     sizeof(struct sa_state_record) * i;
228                 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
229                     sizeof(struct sa_state_record) * i;
230         }
231
232         return 0;
233 }
234
235 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
236 {
237         if (dev->pdr != NULL)
238                 dma_free_coherent(dev->core_dev->device,
239                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
240                                   dev->pdr, dev->pdr_pa);
241         if (dev->shadow_sa_pool)
242                 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
243                                   dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
244         if (dev->shadow_sr_pool)
245                 dma_free_coherent(dev->core_dev->device,
246                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
247                         dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
248
249         kfree(dev->pdr_uinfo);
250 }
251
252 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
253 {
254         u32 retval;
255         u32 tmp;
256
257         retval = dev->pdr_head;
258         tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
259
260         if (tmp == dev->pdr_tail)
261                 return ERING_WAS_FULL;
262
263         dev->pdr_head = tmp;
264
265         return retval;
266 }
267
268 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
269 {
270         struct pd_uinfo *pd_uinfo;
271         unsigned long flags;
272
273         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
274                                        sizeof(struct pd_uinfo) * idx);
275         spin_lock_irqsave(&dev->core_dev->lock, flags);
276         if (dev->pdr_tail != PPC4XX_LAST_PD)
277                 dev->pdr_tail++;
278         else
279                 dev->pdr_tail = 0;
280         pd_uinfo->state = PD_ENTRY_FREE;
281         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
282
283         return 0;
284 }
285
286 static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
287                                        dma_addr_t *pd_dma, u32 idx)
288 {
289         *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
290
291         return dev->pdr + sizeof(struct ce_pd) * idx;
292 }
293
294 /**
295  * alloc memory for the gather ring
296  * no need to alloc buf for the ring
297  * gdr_tail, gdr_head and gdr_count are initialized by this function
298  */
299 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
300 {
301         dev->gdr = dma_alloc_coherent(dev->core_dev->device,
302                                       sizeof(struct ce_gd) * PPC4XX_NUM_GD,
303                                       &dev->gdr_pa, GFP_ATOMIC);
304         if (!dev->gdr)
305                 return -ENOMEM;
306
307         memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
308
309         return 0;
310 }
311
312 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
313 {
314         dma_free_coherent(dev->core_dev->device,
315                           sizeof(struct ce_gd) * PPC4XX_NUM_GD,
316                           dev->gdr, dev->gdr_pa);
317 }
318
319 /*
320  * when this function is called.
321  * preemption or interrupt must be disabled
322  */
323 u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
324 {
325         u32 retval;
326         u32 tmp;
327         if (n >= PPC4XX_NUM_GD)
328                 return ERING_WAS_FULL;
329
330         retval = dev->gdr_head;
331         tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
332         if (dev->gdr_head > dev->gdr_tail) {
333                 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
334                         return ERING_WAS_FULL;
335         } else if (dev->gdr_head < dev->gdr_tail) {
336                 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
337                         return ERING_WAS_FULL;
338         }
339         dev->gdr_head = tmp;
340
341         return retval;
342 }
343
344 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
345 {
346         unsigned long flags;
347
348         spin_lock_irqsave(&dev->core_dev->lock, flags);
349         if (dev->gdr_tail == dev->gdr_head) {
350                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
351                 return 0;
352         }
353
354         if (dev->gdr_tail != PPC4XX_LAST_GD)
355                 dev->gdr_tail++;
356         else
357                 dev->gdr_tail = 0;
358
359         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
360
361         return 0;
362 }
363
364 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
365                                               dma_addr_t *gd_dma, u32 idx)
366 {
367         *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
368
369         return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
370 }
371
372 /**
373  * alloc memory for the scatter ring
374  * need to alloc buf for the ring
375  * sdr_tail, sdr_head and sdr_count are initialized by this function
376  */
377 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
378 {
379         int i;
380         struct ce_sd *sd_array;
381
382         /* alloc memory for scatter descriptor ring */
383         dev->sdr = dma_alloc_coherent(dev->core_dev->device,
384                                       sizeof(struct ce_sd) * PPC4XX_NUM_SD,
385                                       &dev->sdr_pa, GFP_ATOMIC);
386         if (!dev->sdr)
387                 return -ENOMEM;
388
389         dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
390         dev->scatter_buffer_va =
391                 dma_alloc_coherent(dev->core_dev->device,
392                         dev->scatter_buffer_size * PPC4XX_NUM_SD,
393                         &dev->scatter_buffer_pa, GFP_ATOMIC);
394         if (!dev->scatter_buffer_va) {
395                 dma_free_coherent(dev->core_dev->device,
396                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
397                                   dev->sdr, dev->sdr_pa);
398                 return -ENOMEM;
399         }
400
401         sd_array = dev->sdr;
402
403         for (i = 0; i < PPC4XX_NUM_SD; i++) {
404                 sd_array[i].ptr = dev->scatter_buffer_pa +
405                                   dev->scatter_buffer_size * i;
406         }
407
408         return 0;
409 }
410
411 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
412 {
413         if (dev->sdr != NULL)
414                 dma_free_coherent(dev->core_dev->device,
415                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
416                                   dev->sdr, dev->sdr_pa);
417
418         if (dev->scatter_buffer_va != NULL)
419                 dma_free_coherent(dev->core_dev->device,
420                                   dev->scatter_buffer_size * PPC4XX_NUM_SD,
421                                   dev->scatter_buffer_va,
422                                   dev->scatter_buffer_pa);
423 }
424
425 /*
426  * when this function is called.
427  * preemption or interrupt must be disabled
428  */
429 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
430 {
431         u32 retval;
432         u32 tmp;
433
434         if (n >= PPC4XX_NUM_SD)
435                 return ERING_WAS_FULL;
436
437         retval = dev->sdr_head;
438         tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
439         if (dev->sdr_head > dev->gdr_tail) {
440                 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
441                         return ERING_WAS_FULL;
442         } else if (dev->sdr_head < dev->sdr_tail) {
443                 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
444                         return ERING_WAS_FULL;
445         } /* the head = tail, or empty case is already take cared */
446         dev->sdr_head = tmp;
447
448         return retval;
449 }
450
451 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
452 {
453         unsigned long flags;
454
455         spin_lock_irqsave(&dev->core_dev->lock, flags);
456         if (dev->sdr_tail == dev->sdr_head) {
457                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
458                 return 0;
459         }
460         if (dev->sdr_tail != PPC4XX_LAST_SD)
461                 dev->sdr_tail++;
462         else
463                 dev->sdr_tail = 0;
464         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
465
466         return 0;
467 }
468
469 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
470                                               dma_addr_t *sd_dma, u32 idx)
471 {
472         *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
473
474         return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
475 }
476
477 static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
478                                    dma_addr_t *addr, u32 *length,
479                                    u32 *idx, u32 *offset, u32 *nbytes)
480 {
481         u32 len;
482
483         if (*length > dev->scatter_buffer_size) {
484                 memcpy(phys_to_virt(*addr),
485                         dev->scatter_buffer_va +
486                         *idx * dev->scatter_buffer_size + *offset,
487                         dev->scatter_buffer_size);
488                 *offset = 0;
489                 *length -= dev->scatter_buffer_size;
490                 *nbytes -= dev->scatter_buffer_size;
491                 if (*idx == PPC4XX_LAST_SD)
492                         *idx = 0;
493                 else
494                         (*idx)++;
495                 *addr = *addr +  dev->scatter_buffer_size;
496                 return 1;
497         } else if (*length < dev->scatter_buffer_size) {
498                 memcpy(phys_to_virt(*addr),
499                         dev->scatter_buffer_va +
500                         *idx * dev->scatter_buffer_size + *offset, *length);
501                 if ((*offset + *length) == dev->scatter_buffer_size) {
502                         if (*idx == PPC4XX_LAST_SD)
503                                 *idx = 0;
504                         else
505                                 (*idx)++;
506                         *nbytes -= *length;
507                         *offset = 0;
508                 } else {
509                         *nbytes -= *length;
510                         *offset += *length;
511                 }
512
513                 return 0;
514         } else {
515                 len = (*nbytes <= dev->scatter_buffer_size) ?
516                                 (*nbytes) : dev->scatter_buffer_size;
517                 memcpy(phys_to_virt(*addr),
518                         dev->scatter_buffer_va +
519                         *idx * dev->scatter_buffer_size + *offset,
520                         len);
521                 *offset = 0;
522                 *nbytes -= len;
523
524                 if (*idx == PPC4XX_LAST_SD)
525                         *idx = 0;
526                 else
527                         (*idx)++;
528
529                 return 0;
530     }
531 }
532
533 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
534                                       struct ce_pd *pd,
535                                       struct pd_uinfo *pd_uinfo,
536                                       u32 nbytes,
537                                       struct scatterlist *dst)
538 {
539         dma_addr_t addr;
540         u32 this_sd;
541         u32 offset;
542         u32 len;
543         u32 i;
544         u32 sg_len;
545         struct scatterlist *sg;
546
547         this_sd = pd_uinfo->first_sd;
548         offset = 0;
549         i = 0;
550
551         while (nbytes) {
552                 sg = &dst[i];
553                 sg_len = sg->length;
554                 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
555                                 sg->offset, sg->length, DMA_TO_DEVICE);
556
557                 if (offset == 0) {
558                         len = (nbytes <= sg->length) ? nbytes : sg->length;
559                         while (crypto4xx_fill_one_page(dev, &addr, &len,
560                                 &this_sd, &offset, &nbytes))
561                                 ;
562                         if (!nbytes)
563                                 return;
564                         i++;
565                 } else {
566                         len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
567                                 nbytes : (dev->scatter_buffer_size - offset);
568                         len = (sg->length < len) ? sg->length : len;
569                         while (crypto4xx_fill_one_page(dev, &addr, &len,
570                                                &this_sd, &offset, &nbytes))
571                                 ;
572                         if (!nbytes)
573                                 return;
574                         sg_len -= len;
575                         if (sg_len) {
576                                 addr += len;
577                                 while (crypto4xx_fill_one_page(dev, &addr,
578                                         &sg_len, &this_sd, &offset, &nbytes))
579                                         ;
580                         }
581                         i++;
582                 }
583         }
584 }
585
586 static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
587                                         struct crypto4xx_ctx *ctx)
588 {
589         struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
590         struct sa_state_record *state_record =
591                                 (struct sa_state_record *) pd_uinfo->sr_va;
592
593         if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
594                 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
595                        SA_HASH_ALG_SHA1_DIGEST_SIZE);
596         }
597
598         return 0;
599 }
600
601 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
602                                   struct pd_uinfo *pd_uinfo)
603 {
604         int i;
605         if (pd_uinfo->num_gd) {
606                 for (i = 0; i < pd_uinfo->num_gd; i++)
607                         crypto4xx_put_gd_to_gdr(dev);
608                 pd_uinfo->first_gd = 0xffffffff;
609                 pd_uinfo->num_gd = 0;
610         }
611         if (pd_uinfo->num_sd) {
612                 for (i = 0; i < pd_uinfo->num_sd; i++)
613                         crypto4xx_put_sd_to_sdr(dev);
614
615                 pd_uinfo->first_sd = 0xffffffff;
616                 pd_uinfo->num_sd = 0;
617         }
618 }
619
620 static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
621                                      struct pd_uinfo *pd_uinfo,
622                                      struct ce_pd *pd)
623 {
624         struct crypto4xx_ctx *ctx;
625         struct ablkcipher_request *ablk_req;
626         struct scatterlist *dst;
627         dma_addr_t addr;
628
629         ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
630         ctx  = crypto_tfm_ctx(ablk_req->base.tfm);
631
632         if (pd_uinfo->using_sd) {
633                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
634                                           ablk_req->dst);
635         } else {
636                 dst = pd_uinfo->dest_va;
637                 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
638                                     dst->offset, dst->length, DMA_FROM_DEVICE);
639         }
640         crypto4xx_ret_sg_desc(dev, pd_uinfo);
641         if (ablk_req->base.complete != NULL)
642                 ablk_req->base.complete(&ablk_req->base, 0);
643
644         return 0;
645 }
646
647 static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
648                                 struct pd_uinfo *pd_uinfo)
649 {
650         struct crypto4xx_ctx *ctx;
651         struct ahash_request *ahash_req;
652
653         ahash_req = ahash_request_cast(pd_uinfo->async_req);
654         ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
655
656         crypto4xx_copy_digest_to_dst(pd_uinfo,
657                                      crypto_tfm_ctx(ahash_req->base.tfm));
658         crypto4xx_ret_sg_desc(dev, pd_uinfo);
659         /* call user provided callback function x */
660         if (ahash_req->base.complete != NULL)
661                 ahash_req->base.complete(&ahash_req->base, 0);
662
663         return 0;
664 }
665
666 static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
667 {
668         struct ce_pd *pd;
669         struct pd_uinfo *pd_uinfo;
670
671         pd =  dev->pdr + sizeof(struct ce_pd)*idx;
672         pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
673         if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
674                         CRYPTO_ALG_TYPE_ABLKCIPHER)
675                 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
676         else
677                 return crypto4xx_ahash_done(dev, pd_uinfo);
678 }
679
680 /**
681  * Note: Only use this function to copy items that is word aligned.
682  */
683 void crypto4xx_memcpy_le(unsigned int *dst,
684                          const unsigned char *buf,
685                          int len)
686 {
687         u8 *tmp;
688         for (; len >= 4; buf += 4, len -= 4)
689                 *dst++ = cpu_to_le32(*(unsigned int *) buf);
690
691         tmp = (u8 *)dst;
692         switch (len) {
693         case 3:
694                 *tmp++ = 0;
695                 *tmp++ = *(buf+2);
696                 *tmp++ = *(buf+1);
697                 *tmp++ = *buf;
698                 break;
699         case 2:
700                 *tmp++ = 0;
701                 *tmp++ = 0;
702                 *tmp++ = *(buf+1);
703                 *tmp++ = *buf;
704                 break;
705         case 1:
706                 *tmp++ = 0;
707                 *tmp++ = 0;
708                 *tmp++ = 0;
709                 *tmp++ = *buf;
710                 break;
711         default:
712                 break;
713         }
714 }
715
716 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
717 {
718         crypto4xx_destroy_pdr(core_dev->dev);
719         crypto4xx_destroy_gdr(core_dev->dev);
720         crypto4xx_destroy_sdr(core_dev->dev);
721         dev_set_drvdata(core_dev->device, NULL);
722         iounmap(core_dev->dev->ce_base);
723         kfree(core_dev->dev);
724         kfree(core_dev);
725 }
726
727 void crypto4xx_return_pd(struct crypto4xx_device *dev,
728                          u32 pd_entry, struct ce_pd *pd,
729                          struct pd_uinfo *pd_uinfo)
730 {
731         /* irq should be already disabled */
732         dev->pdr_head = pd_entry;
733         pd->pd_ctl.w = 0;
734         pd->pd_ctl_len.w = 0;
735         pd_uinfo->state = PD_ENTRY_FREE;
736 }
737
738 /*
739  * derive number of elements in scatterlist
740  * Shamlessly copy from talitos.c
741  */
742 static int get_sg_count(struct scatterlist *sg_list, int nbytes)
743 {
744         struct scatterlist *sg = sg_list;
745         int sg_nents = 0;
746
747         while (nbytes) {
748                 sg_nents++;
749                 if (sg->length > nbytes)
750                         break;
751                 nbytes -= sg->length;
752                 sg = sg_next(sg);
753         }
754
755         return sg_nents;
756 }
757
758 static u32 get_next_gd(u32 current)
759 {
760         if (current != PPC4XX_LAST_GD)
761                 return current + 1;
762         else
763                 return 0;
764 }
765
766 static u32 get_next_sd(u32 current)
767 {
768         if (current != PPC4XX_LAST_SD)
769                 return current + 1;
770         else
771                 return 0;
772 }
773
774 u32 crypto4xx_build_pd(struct crypto_async_request *req,
775                        struct crypto4xx_ctx *ctx,
776                        struct scatterlist *src,
777                        struct scatterlist *dst,
778                        unsigned int datalen,
779                        void *iv, u32 iv_len)
780 {
781         struct crypto4xx_device *dev = ctx->dev;
782         dma_addr_t addr, pd_dma, sd_dma, gd_dma;
783         struct dynamic_sa_ctl *sa;
784         struct scatterlist *sg;
785         struct ce_gd *gd;
786         struct ce_pd *pd;
787         u32 num_gd, num_sd;
788         u32 fst_gd = 0xffffffff;
789         u32 fst_sd = 0xffffffff;
790         u32 pd_entry;
791         unsigned long flags;
792         struct pd_uinfo *pd_uinfo = NULL;
793         unsigned int nbytes = datalen, idx;
794         unsigned int ivlen = 0;
795         u32 gd_idx = 0;
796
797         /* figure how many gd is needed */
798         num_gd = get_sg_count(src, datalen);
799         if (num_gd == 1)
800                 num_gd = 0;
801
802         /* figure how many sd is needed */
803         if (sg_is_last(dst) || ctx->is_hash) {
804                 num_sd = 0;
805         } else {
806                 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
807                         num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
808                         if (datalen % PPC4XX_SD_BUFFER_SIZE)
809                                 num_sd++;
810                 } else {
811                         num_sd = 1;
812                 }
813         }
814
815         /*
816          * The follow section of code needs to be protected
817          * The gather ring and scatter ring needs to be consecutive
818          * In case of run out of any kind of descriptor, the descriptor
819          * already got must be return the original place.
820          */
821         spin_lock_irqsave(&dev->core_dev->lock, flags);
822         if (num_gd) {
823                 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
824                 if (fst_gd == ERING_WAS_FULL) {
825                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
826                         return -EAGAIN;
827                 }
828         }
829         if (num_sd) {
830                 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
831                 if (fst_sd == ERING_WAS_FULL) {
832                         if (num_gd)
833                                 dev->gdr_head = fst_gd;
834                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
835                         return -EAGAIN;
836                 }
837         }
838         pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
839         if (pd_entry == ERING_WAS_FULL) {
840                 if (num_gd)
841                         dev->gdr_head = fst_gd;
842                 if (num_sd)
843                         dev->sdr_head = fst_sd;
844                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
845                 return -EAGAIN;
846         }
847         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
848
849         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
850                                        sizeof(struct pd_uinfo) * pd_entry);
851         pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
852         pd_uinfo->async_req = req;
853         pd_uinfo->num_gd = num_gd;
854         pd_uinfo->num_sd = num_sd;
855
856         if (iv_len || ctx->is_hash) {
857                 ivlen = iv_len;
858                 pd->sa = pd_uinfo->sa_pa;
859                 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
860                 if (ctx->direction == DIR_INBOUND)
861                         memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
862                 else
863                         memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
864
865                 memcpy((void *) sa + ctx->offset_to_sr_ptr,
866                         &pd_uinfo->sr_pa, 4);
867
868                 if (iv_len)
869                         crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
870         } else {
871                 if (ctx->direction == DIR_INBOUND) {
872                         pd->sa = ctx->sa_in_dma_addr;
873                         sa = (struct dynamic_sa_ctl *) ctx->sa_in;
874                 } else {
875                         pd->sa = ctx->sa_out_dma_addr;
876                         sa = (struct dynamic_sa_ctl *) ctx->sa_out;
877                 }
878         }
879         pd->sa_len = ctx->sa_len;
880         if (num_gd) {
881                 /* get first gd we are going to use */
882                 gd_idx = fst_gd;
883                 pd_uinfo->first_gd = fst_gd;
884                 pd_uinfo->num_gd = num_gd;
885                 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
886                 pd->src = gd_dma;
887                 /* enable gather */
888                 sa->sa_command_0.bf.gather = 1;
889                 idx = 0;
890                 src = &src[0];
891                 /* walk the sg, and setup gather array */
892                 while (nbytes) {
893                         sg = &src[idx];
894                         addr = dma_map_page(dev->core_dev->device, sg_page(sg),
895                                     sg->offset, sg->length, DMA_TO_DEVICE);
896                         gd->ptr = addr;
897                         gd->ctl_len.len = sg->length;
898                         gd->ctl_len.done = 0;
899                         gd->ctl_len.ready = 1;
900                         if (sg->length >= nbytes)
901                                 break;
902                         nbytes -= sg->length;
903                         gd_idx = get_next_gd(gd_idx);
904                         gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
905                         idx++;
906                 }
907         } else {
908                 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
909                                 src->offset, src->length, DMA_TO_DEVICE);
910                 /*
911                  * Disable gather in sa command
912                  */
913                 sa->sa_command_0.bf.gather = 0;
914                 /*
915                  * Indicate gather array is not used
916                  */
917                 pd_uinfo->first_gd = 0xffffffff;
918                 pd_uinfo->num_gd = 0;
919         }
920         if (ctx->is_hash || sg_is_last(dst)) {
921                 /*
922                  * we know application give us dst a whole piece of memory
923                  * no need to use scatter ring.
924                  * In case of is_hash, the icv is always at end of src data.
925                  */
926                 pd_uinfo->using_sd = 0;
927                 pd_uinfo->first_sd = 0xffffffff;
928                 pd_uinfo->num_sd = 0;
929                 pd_uinfo->dest_va = dst;
930                 sa->sa_command_0.bf.scatter = 0;
931                 if (ctx->is_hash)
932                         pd->dest = virt_to_phys((void *)dst);
933                 else
934                         pd->dest = (u32)dma_map_page(dev->core_dev->device,
935                                         sg_page(dst), dst->offset,
936                                         dst->length, DMA_TO_DEVICE);
937         } else {
938                 struct ce_sd *sd = NULL;
939                 u32 sd_idx = fst_sd;
940                 nbytes = datalen;
941                 sa->sa_command_0.bf.scatter = 1;
942                 pd_uinfo->using_sd = 1;
943                 pd_uinfo->dest_va = dst;
944                 pd_uinfo->first_sd = fst_sd;
945                 pd_uinfo->num_sd = num_sd;
946                 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
947                 pd->dest = sd_dma;
948                 /* setup scatter descriptor */
949                 sd->ctl.done = 0;
950                 sd->ctl.rdy = 1;
951                 /* sd->ptr should be setup by sd_init routine*/
952                 idx = 0;
953                 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
954                         nbytes -= PPC4XX_SD_BUFFER_SIZE;
955                 else
956                         nbytes = 0;
957                 while (nbytes) {
958                         sd_idx = get_next_sd(sd_idx);
959                         sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
960                         /* setup scatter descriptor */
961                         sd->ctl.done = 0;
962                         sd->ctl.rdy = 1;
963                         if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
964                                 nbytes -= PPC4XX_SD_BUFFER_SIZE;
965                         else
966                                 /*
967                                  * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
968                                  * which is more than nbytes, so done.
969                                  */
970                                 nbytes = 0;
971                 }
972         }
973
974         sa->sa_command_1.bf.hash_crypto_offset = 0;
975         pd->pd_ctl.w = ctx->pd_ctl;
976         pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
977         pd_uinfo->state = PD_ENTRY_INUSE;
978         wmb();
979         /* write any value to push engine to read a pd */
980         writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
981         return -EINPROGRESS;
982 }
983
984 /**
985  * Algorithm Registration Functions
986  */
987 static int crypto4xx_alg_init(struct crypto_tfm *tfm)
988 {
989         struct crypto_alg *alg = tfm->__crt_alg;
990         struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
991         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
992
993         ctx->dev = amcc_alg->dev;
994         ctx->sa_in = NULL;
995         ctx->sa_out = NULL;
996         ctx->sa_in_dma_addr = 0;
997         ctx->sa_out_dma_addr = 0;
998         ctx->sa_len = 0;
999
1000         switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1001         default:
1002                 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1003                 break;
1004         case CRYPTO_ALG_TYPE_AHASH:
1005                 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1006                                          sizeof(struct crypto4xx_ctx));
1007                 break;
1008         }
1009
1010         return 0;
1011 }
1012
1013 static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1014 {
1015         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1016
1017         crypto4xx_free_sa(ctx);
1018         crypto4xx_free_state_record(ctx);
1019 }
1020
1021 int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1022                            struct crypto4xx_alg_common *crypto_alg,
1023                            int array_size)
1024 {
1025         struct crypto4xx_alg *alg;
1026         int i;
1027         int rc = 0;
1028
1029         for (i = 0; i < array_size; i++) {
1030                 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1031                 if (!alg)
1032                         return -ENOMEM;
1033
1034                 alg->alg = crypto_alg[i];
1035                 alg->dev = sec_dev;
1036
1037                 switch (alg->alg.type) {
1038                 case CRYPTO_ALG_TYPE_AHASH:
1039                         rc = crypto_register_ahash(&alg->alg.u.hash);
1040                         break;
1041
1042                 default:
1043                         rc = crypto_register_alg(&alg->alg.u.cipher);
1044                         break;
1045                 }
1046
1047                 if (rc) {
1048                         list_del(&alg->entry);
1049                         kfree(alg);
1050                 } else {
1051                         list_add_tail(&alg->entry, &sec_dev->alg_list);
1052                 }
1053         }
1054
1055         return 0;
1056 }
1057
1058 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1059 {
1060         struct crypto4xx_alg *alg, *tmp;
1061
1062         list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1063                 list_del(&alg->entry);
1064                 switch (alg->alg.type) {
1065                 case CRYPTO_ALG_TYPE_AHASH:
1066                         crypto_unregister_ahash(&alg->alg.u.hash);
1067                         break;
1068
1069                 default:
1070                         crypto_unregister_alg(&alg->alg.u.cipher);
1071                 }
1072                 kfree(alg);
1073         }
1074 }
1075
1076 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1077 {
1078         struct device *dev = (struct device *)data;
1079         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1080         struct pd_uinfo *pd_uinfo;
1081         struct ce_pd *pd;
1082         u32 tail;
1083
1084         while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1085                 tail = core_dev->dev->pdr_tail;
1086                 pd_uinfo = core_dev->dev->pdr_uinfo +
1087                         sizeof(struct pd_uinfo)*tail;
1088                 pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1089                 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1090                                    pd->pd_ctl.bf.pe_done &&
1091                                    !pd->pd_ctl.bf.host_ready) {
1092                         pd->pd_ctl.bf.pe_done = 0;
1093                         crypto4xx_pd_done(core_dev->dev, tail);
1094                         crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1095                         pd_uinfo->state = PD_ENTRY_FREE;
1096                 } else {
1097                         /* if tail not done, break */
1098                         break;
1099                 }
1100         }
1101 }
1102
1103 /**
1104  * Top Half of isr.
1105  */
1106 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1107 {
1108         struct device *dev = (struct device *)data;
1109         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1110
1111         if (core_dev->dev->ce_base == 0)
1112                 return 0;
1113
1114         writel(PPC4XX_INTERRUPT_CLR,
1115                core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1116         tasklet_schedule(&core_dev->tasklet);
1117
1118         return IRQ_HANDLED;
1119 }
1120
1121 /**
1122  * Supported Crypto Algorithms
1123  */
1124 struct crypto4xx_alg_common crypto4xx_alg[] = {
1125         /* Crypto AES modes */
1126         { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1127                 .cra_name       = "cbc(aes)",
1128                 .cra_driver_name = "cbc-aes-ppc4xx",
1129                 .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1130                 .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1131                 .cra_blocksize  = AES_BLOCK_SIZE,
1132                 .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1133                 .cra_type       = &crypto_ablkcipher_type,
1134                 .cra_init       = crypto4xx_alg_init,
1135                 .cra_exit       = crypto4xx_alg_exit,
1136                 .cra_module     = THIS_MODULE,
1137                 .cra_u          = {
1138                         .ablkcipher = {
1139                                 .min_keysize    = AES_MIN_KEY_SIZE,
1140                                 .max_keysize    = AES_MAX_KEY_SIZE,
1141                                 .ivsize         = AES_IV_SIZE,
1142                                 .setkey         = crypto4xx_setkey_aes_cbc,
1143                                 .encrypt        = crypto4xx_encrypt,
1144                                 .decrypt        = crypto4xx_decrypt,
1145                         }
1146                 }
1147         }},
1148 };
1149
1150 /**
1151  * Module Initialization Routine
1152  */
1153 static int __init crypto4xx_probe(struct of_device *ofdev,
1154                                   const struct of_device_id *match)
1155 {
1156         int rc;
1157         struct resource res;
1158         struct device *dev = &ofdev->dev;
1159         struct crypto4xx_core_device *core_dev;
1160
1161         rc = of_address_to_resource(ofdev->node, 0, &res);
1162         if (rc)
1163                 return -ENODEV;
1164
1165         if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1166                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1167                        mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1168                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1169                        mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1170         } else if (of_find_compatible_node(NULL, NULL,
1171                         "amcc,ppc405ex-crypto")) {
1172                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1173                        mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1174                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1175                        mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1176         } else if (of_find_compatible_node(NULL, NULL,
1177                         "amcc,ppc460sx-crypto")) {
1178                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1179                        mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1180                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1181                        mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1182         } else {
1183                 printk(KERN_ERR "Crypto Function Not supported!\n");
1184                 return -EINVAL;
1185         }
1186
1187         core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1188         if (!core_dev)
1189                 return -ENOMEM;
1190
1191         dev_set_drvdata(dev, core_dev);
1192         core_dev->ofdev = ofdev;
1193         core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1194         if (!core_dev->dev)
1195                 goto err_alloc_dev;
1196
1197         core_dev->dev->core_dev = core_dev;
1198         core_dev->device = dev;
1199         spin_lock_init(&core_dev->lock);
1200         INIT_LIST_HEAD(&core_dev->dev->alg_list);
1201         rc = crypto4xx_build_pdr(core_dev->dev);
1202         if (rc)
1203                 goto err_build_pdr;
1204
1205         rc = crypto4xx_build_gdr(core_dev->dev);
1206         if (rc)
1207                 goto err_build_gdr;
1208
1209         rc = crypto4xx_build_sdr(core_dev->dev);
1210         if (rc)
1211                 goto err_build_sdr;
1212
1213         /* Init tasklet for bottom half processing */
1214         tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1215                      (unsigned long) dev);
1216
1217         /* Register for Crypto isr, Crypto Engine IRQ */
1218         core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
1219         rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1220                          core_dev->dev->name, dev);
1221         if (rc)
1222                 goto err_request_irq;
1223
1224         core_dev->dev->ce_base = of_iomap(ofdev->node, 0);
1225         if (!core_dev->dev->ce_base) {
1226                 dev_err(dev, "failed to of_iomap\n");
1227                 goto err_iomap;
1228         }
1229
1230         /* need to setup pdr, rdr, gdr and sdr before this */
1231         crypto4xx_hw_init(core_dev->dev);
1232
1233         /* Register security algorithms with Linux CryptoAPI */
1234         rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1235                                ARRAY_SIZE(crypto4xx_alg));
1236         if (rc)
1237                 goto err_start_dev;
1238
1239         return 0;
1240
1241 err_start_dev:
1242         iounmap(core_dev->dev->ce_base);
1243 err_iomap:
1244         free_irq(core_dev->irq, dev);
1245         irq_dispose_mapping(core_dev->irq);
1246         tasklet_kill(&core_dev->tasklet);
1247 err_request_irq:
1248         crypto4xx_destroy_sdr(core_dev->dev);
1249 err_build_sdr:
1250         crypto4xx_destroy_gdr(core_dev->dev);
1251 err_build_gdr:
1252         crypto4xx_destroy_pdr(core_dev->dev);
1253 err_build_pdr:
1254         kfree(core_dev->dev);
1255 err_alloc_dev:
1256         kfree(core_dev);
1257
1258         return rc;
1259 }
1260
1261 static int __exit crypto4xx_remove(struct of_device *ofdev)
1262 {
1263         struct device *dev = &ofdev->dev;
1264         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1265
1266         free_irq(core_dev->irq, dev);
1267         irq_dispose_mapping(core_dev->irq);
1268
1269         tasklet_kill(&core_dev->tasklet);
1270         /* Un-register with Linux CryptoAPI */
1271         crypto4xx_unregister_alg(core_dev->dev);
1272         /* Free all allocated memory */
1273         crypto4xx_stop_all(core_dev);
1274
1275         return 0;
1276 }
1277
1278 static const struct of_device_id crypto4xx_match[] = {
1279         { .compatible      = "amcc,ppc4xx-crypto",},
1280         { },
1281 };
1282
1283 static struct of_platform_driver crypto4xx_driver = {
1284         .driver = {
1285                 .name = "crypto4xx",
1286                 .owner = THIS_MODULE,
1287                 .of_match_table = crypto4xx_match,
1288         },
1289         .probe          = crypto4xx_probe,
1290         .remove         = crypto4xx_remove,
1291 };
1292
1293 static int __init crypto4xx_init(void)
1294 {
1295         return of_register_platform_driver(&crypto4xx_driver);
1296 }
1297
1298 static void __exit crypto4xx_exit(void)
1299 {
1300         of_unregister_platform_driver(&crypto4xx_driver);
1301 }
1302
1303 module_init(crypto4xx_init);
1304 module_exit(crypto4xx_exit);
1305
1306 MODULE_LICENSE("GPL");
1307 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1308 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
1309