Merge tag 'fixes-for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[sfrench/cifs-2.6.git] / arch / arm / mach-exynos / common.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Common Codes for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/export.h>
23 #include <linux/irqdomain.h>
24 #include <linux/of_address.h>
25
26 #include <asm/proc-fns.h>
27 #include <asm/exception.h>
28 #include <asm/hardware/cache-l2x0.h>
29 #include <asm/hardware/gic.h>
30 #include <asm/mach/map.h>
31 #include <asm/mach/irq.h>
32 #include <asm/cacheflush.h>
33
34 #include <mach/regs-irq.h>
35 #include <mach/regs-pmu.h>
36 #include <mach/regs-gpio.h>
37 #include <mach/pmu.h>
38
39 #include <plat/cpu.h>
40 #include <plat/clock.h>
41 #include <plat/devs.h>
42 #include <plat/pm.h>
43 #include <plat/sdhci.h>
44 #include <plat/gpio-cfg.h>
45 #include <plat/adc-core.h>
46 #include <plat/fb-core.h>
47 #include <plat/fimc-core.h>
48 #include <plat/iic-core.h>
49 #include <plat/tv-core.h>
50 #include <plat/spi-core.h>
51 #include <plat/regs-serial.h>
52
53 #include "common.h"
54 #define L2_AUX_VAL 0x7C470001
55 #define L2_AUX_MASK 0xC200ffff
56
57 static const char name_exynos4210[] = "EXYNOS4210";
58 static const char name_exynos4212[] = "EXYNOS4212";
59 static const char name_exynos4412[] = "EXYNOS4412";
60 static const char name_exynos5250[] = "EXYNOS5250";
61
62 static void exynos4_map_io(void);
63 static void exynos5_map_io(void);
64 static void exynos4_init_clocks(int xtal);
65 static void exynos5_init_clocks(int xtal);
66 static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
67 static int exynos_init(void);
68
69 static struct cpu_table cpu_ids[] __initdata = {
70         {
71                 .idcode         = EXYNOS4210_CPU_ID,
72                 .idmask         = EXYNOS4_CPU_MASK,
73                 .map_io         = exynos4_map_io,
74                 .init_clocks    = exynos4_init_clocks,
75                 .init_uarts     = exynos_init_uarts,
76                 .init           = exynos_init,
77                 .name           = name_exynos4210,
78         }, {
79                 .idcode         = EXYNOS4212_CPU_ID,
80                 .idmask         = EXYNOS4_CPU_MASK,
81                 .map_io         = exynos4_map_io,
82                 .init_clocks    = exynos4_init_clocks,
83                 .init_uarts     = exynos_init_uarts,
84                 .init           = exynos_init,
85                 .name           = name_exynos4212,
86         }, {
87                 .idcode         = EXYNOS4412_CPU_ID,
88                 .idmask         = EXYNOS4_CPU_MASK,
89                 .map_io         = exynos4_map_io,
90                 .init_clocks    = exynos4_init_clocks,
91                 .init_uarts     = exynos_init_uarts,
92                 .init           = exynos_init,
93                 .name           = name_exynos4412,
94         }, {
95                 .idcode         = EXYNOS5250_SOC_ID,
96                 .idmask         = EXYNOS5_SOC_MASK,
97                 .map_io         = exynos5_map_io,
98                 .init_clocks    = exynos5_init_clocks,
99                 .init_uarts     = exynos_init_uarts,
100                 .init           = exynos_init,
101                 .name           = name_exynos5250,
102         },
103 };
104
105 /* Initial IO mappings */
106
107 static struct map_desc exynos_iodesc[] __initdata = {
108         {
109                 .virtual        = (unsigned long)S5P_VA_CHIPID,
110                 .pfn            = __phys_to_pfn(EXYNOS_PA_CHIPID),
111                 .length         = SZ_4K,
112                 .type           = MT_DEVICE,
113         },
114 };
115
116 static struct map_desc exynos4_iodesc[] __initdata = {
117         {
118                 .virtual        = (unsigned long)S3C_VA_SYS,
119                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSCON),
120                 .length         = SZ_64K,
121                 .type           = MT_DEVICE,
122         }, {
123                 .virtual        = (unsigned long)S3C_VA_TIMER,
124                 .pfn            = __phys_to_pfn(EXYNOS4_PA_TIMER),
125                 .length         = SZ_16K,
126                 .type           = MT_DEVICE,
127         }, {
128                 .virtual        = (unsigned long)S3C_VA_WATCHDOG,
129                 .pfn            = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
130                 .length         = SZ_4K,
131                 .type           = MT_DEVICE,
132         }, {
133                 .virtual        = (unsigned long)S5P_VA_SROMC,
134                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
135                 .length         = SZ_4K,
136                 .type           = MT_DEVICE,
137         }, {
138                 .virtual        = (unsigned long)S5P_VA_SYSTIMER,
139                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
140                 .length         = SZ_4K,
141                 .type           = MT_DEVICE,
142         }, {
143                 .virtual        = (unsigned long)S5P_VA_PMU,
144                 .pfn            = __phys_to_pfn(EXYNOS4_PA_PMU),
145                 .length         = SZ_64K,
146                 .type           = MT_DEVICE,
147         }, {
148                 .virtual        = (unsigned long)S5P_VA_COMBINER_BASE,
149                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COMBINER),
150                 .length         = SZ_4K,
151                 .type           = MT_DEVICE,
152         }, {
153                 .virtual        = (unsigned long)S5P_VA_GIC_CPU,
154                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
155                 .length         = SZ_64K,
156                 .type           = MT_DEVICE,
157         }, {
158                 .virtual        = (unsigned long)S5P_VA_GIC_DIST,
159                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
160                 .length         = SZ_64K,
161                 .type           = MT_DEVICE,
162         }, {
163                 .virtual        = (unsigned long)S3C_VA_UART,
164                 .pfn            = __phys_to_pfn(EXYNOS4_PA_UART),
165                 .length         = SZ_512K,
166                 .type           = MT_DEVICE,
167         }, {
168                 .virtual        = (unsigned long)S5P_VA_CMU,
169                 .pfn            = __phys_to_pfn(EXYNOS4_PA_CMU),
170                 .length         = SZ_128K,
171                 .type           = MT_DEVICE,
172         }, {
173                 .virtual        = (unsigned long)S5P_VA_COREPERI_BASE,
174                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COREPERI),
175                 .length         = SZ_8K,
176                 .type           = MT_DEVICE,
177         }, {
178                 .virtual        = (unsigned long)S5P_VA_L2CC,
179                 .pfn            = __phys_to_pfn(EXYNOS4_PA_L2CC),
180                 .length         = SZ_4K,
181                 .type           = MT_DEVICE,
182         }, {
183                 .virtual        = (unsigned long)S5P_VA_DMC0,
184                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC0),
185                 .length         = SZ_64K,
186                 .type           = MT_DEVICE,
187         }, {
188                 .virtual        = (unsigned long)S5P_VA_DMC1,
189                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC1),
190                 .length         = SZ_64K,
191                 .type           = MT_DEVICE,
192         }, {
193                 .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
194                 .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
195                 .length         = SZ_4K,
196                 .type           = MT_DEVICE,
197         },
198 };
199
200 static struct map_desc exynos4_iodesc0[] __initdata = {
201         {
202                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
203                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
204                 .length         = SZ_4K,
205                 .type           = MT_DEVICE,
206         },
207 };
208
209 static struct map_desc exynos4_iodesc1[] __initdata = {
210         {
211                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
212                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
213                 .length         = SZ_4K,
214                 .type           = MT_DEVICE,
215         },
216 };
217
218 static struct map_desc exynos5_iodesc[] __initdata = {
219         {
220                 .virtual        = (unsigned long)S3C_VA_SYS,
221                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSCON),
222                 .length         = SZ_64K,
223                 .type           = MT_DEVICE,
224         }, {
225                 .virtual        = (unsigned long)S3C_VA_TIMER,
226                 .pfn            = __phys_to_pfn(EXYNOS5_PA_TIMER),
227                 .length         = SZ_16K,
228                 .type           = MT_DEVICE,
229         }, {
230                 .virtual        = (unsigned long)S3C_VA_WATCHDOG,
231                 .pfn            = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
232                 .length         = SZ_4K,
233                 .type           = MT_DEVICE,
234         }, {
235                 .virtual        = (unsigned long)S5P_VA_SROMC,
236                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SROMC),
237                 .length         = SZ_4K,
238                 .type           = MT_DEVICE,
239         }, {
240                 .virtual        = (unsigned long)S5P_VA_SYSTIMER,
241                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
242                 .length         = SZ_4K,
243                 .type           = MT_DEVICE,
244         }, {
245                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
246                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
247                 .length         = SZ_4K,
248                 .type           = MT_DEVICE,
249         }, {
250                 .virtual        = (unsigned long)S5P_VA_CMU,
251                 .pfn            = __phys_to_pfn(EXYNOS5_PA_CMU),
252                 .length         = 144 * SZ_1K,
253                 .type           = MT_DEVICE,
254         }, {
255                 .virtual        = (unsigned long)S5P_VA_PMU,
256                 .pfn            = __phys_to_pfn(EXYNOS5_PA_PMU),
257                 .length         = SZ_64K,
258                 .type           = MT_DEVICE,
259         }, {
260                 .virtual        = (unsigned long)S5P_VA_COMBINER_BASE,
261                 .pfn            = __phys_to_pfn(EXYNOS5_PA_COMBINER),
262                 .length         = SZ_4K,
263                 .type           = MT_DEVICE,
264         }, {
265                 .virtual        = (unsigned long)S3C_VA_UART,
266                 .pfn            = __phys_to_pfn(EXYNOS5_PA_UART),
267                 .length         = SZ_512K,
268                 .type           = MT_DEVICE,
269         }, {
270                 .virtual        = (unsigned long)S5P_VA_GIC_CPU,
271                 .pfn            = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
272                 .length         = SZ_8K,
273                 .type           = MT_DEVICE,
274         }, {
275                 .virtual        = (unsigned long)S5P_VA_GIC_DIST,
276                 .pfn            = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
277                 .length         = SZ_4K,
278                 .type           = MT_DEVICE,
279         },
280 };
281
282 void exynos4_restart(char mode, const char *cmd)
283 {
284         __raw_writel(0x1, S5P_SWRESET);
285 }
286
287 void exynos5_restart(char mode, const char *cmd)
288 {
289         __raw_writel(0x1, EXYNOS_SWRESET);
290 }
291
292 void __init exynos_init_late(void)
293 {
294         exynos_pm_late_initcall();
295 }
296
297 /*
298  * exynos_map_io
299  *
300  * register the standard cpu IO areas
301  */
302
303 void __init exynos_init_io(struct map_desc *mach_desc, int size)
304 {
305         /* initialize the io descriptors we need for initialization */
306         iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
307         if (mach_desc)
308                 iotable_init(mach_desc, size);
309
310         /* detect cpu id and rev. */
311         s5p_init_cpu(S5P_VA_CHIPID);
312
313         s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
314 }
315
316 static void __init exynos4_map_io(void)
317 {
318         iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
319
320         if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
321                 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
322         else
323                 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
324
325         /* initialize device information early */
326         exynos4_default_sdhci0();
327         exynos4_default_sdhci1();
328         exynos4_default_sdhci2();
329         exynos4_default_sdhci3();
330
331         s3c_adc_setname("samsung-adc-v3");
332
333         s3c_fimc_setname(0, "exynos4-fimc");
334         s3c_fimc_setname(1, "exynos4-fimc");
335         s3c_fimc_setname(2, "exynos4-fimc");
336         s3c_fimc_setname(3, "exynos4-fimc");
337
338         s3c_sdhci_setname(0, "exynos4-sdhci");
339         s3c_sdhci_setname(1, "exynos4-sdhci");
340         s3c_sdhci_setname(2, "exynos4-sdhci");
341         s3c_sdhci_setname(3, "exynos4-sdhci");
342
343         /* The I2C bus controllers are directly compatible with s3c2440 */
344         s3c_i2c0_setname("s3c2440-i2c");
345         s3c_i2c1_setname("s3c2440-i2c");
346         s3c_i2c2_setname("s3c2440-i2c");
347
348         s5p_fb_setname(0, "exynos4-fb");
349         s5p_hdmi_setname("exynos4-hdmi");
350
351         s3c64xx_spi_setname("exynos4210-spi");
352 }
353
354 static void __init exynos5_map_io(void)
355 {
356         iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
357
358         s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
359         s3c_device_i2c0.resource[0].end   = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
360         s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
361         s3c_device_i2c0.resource[1].end   = EXYNOS5_IRQ_IIC;
362
363         s3c_sdhci_setname(0, "exynos4-sdhci");
364         s3c_sdhci_setname(1, "exynos4-sdhci");
365         s3c_sdhci_setname(2, "exynos4-sdhci");
366         s3c_sdhci_setname(3, "exynos4-sdhci");
367
368         /* The I2C bus controllers are directly compatible with s3c2440 */
369         s3c_i2c0_setname("s3c2440-i2c");
370         s3c_i2c1_setname("s3c2440-i2c");
371         s3c_i2c2_setname("s3c2440-i2c");
372
373         s3c64xx_spi_setname("exynos4210-spi");
374 }
375
376 static void __init exynos4_init_clocks(int xtal)
377 {
378         printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
379
380         s3c24xx_register_baseclocks(xtal);
381         s5p_register_clocks(xtal);
382
383         if (soc_is_exynos4210())
384                 exynos4210_register_clocks();
385         else if (soc_is_exynos4212() || soc_is_exynos4412())
386                 exynos4212_register_clocks();
387
388         exynos4_register_clocks();
389         exynos4_setup_clocks();
390 }
391
392 static void __init exynos5_init_clocks(int xtal)
393 {
394         printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
395
396         s3c24xx_register_baseclocks(xtal);
397         s5p_register_clocks(xtal);
398
399         exynos5_register_clocks();
400         exynos5_setup_clocks();
401 }
402
403 #define COMBINER_ENABLE_SET     0x0
404 #define COMBINER_ENABLE_CLEAR   0x4
405 #define COMBINER_INT_STATUS     0xC
406
407 static DEFINE_SPINLOCK(irq_controller_lock);
408
409 struct combiner_chip_data {
410         unsigned int irq_offset;
411         unsigned int irq_mask;
412         void __iomem *base;
413 };
414
415 static struct irq_domain *combiner_irq_domain;
416 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
417
418 static inline void __iomem *combiner_base(struct irq_data *data)
419 {
420         struct combiner_chip_data *combiner_data =
421                 irq_data_get_irq_chip_data(data);
422
423         return combiner_data->base;
424 }
425
426 static void combiner_mask_irq(struct irq_data *data)
427 {
428         u32 mask = 1 << (data->hwirq % 32);
429
430         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
431 }
432
433 static void combiner_unmask_irq(struct irq_data *data)
434 {
435         u32 mask = 1 << (data->hwirq % 32);
436
437         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
438 }
439
440 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
441 {
442         struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
443         struct irq_chip *chip = irq_get_chip(irq);
444         unsigned int cascade_irq, combiner_irq;
445         unsigned long status;
446
447         chained_irq_enter(chip, desc);
448
449         spin_lock(&irq_controller_lock);
450         status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
451         spin_unlock(&irq_controller_lock);
452         status &= chip_data->irq_mask;
453
454         if (status == 0)
455                 goto out;
456
457         combiner_irq = __ffs(status);
458
459         cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
460         if (unlikely(cascade_irq >= NR_IRQS))
461                 do_bad_IRQ(cascade_irq, desc);
462         else
463                 generic_handle_irq(cascade_irq);
464
465  out:
466         chained_irq_exit(chip, desc);
467 }
468
469 static struct irq_chip combiner_chip = {
470         .name           = "COMBINER",
471         .irq_mask       = combiner_mask_irq,
472         .irq_unmask     = combiner_unmask_irq,
473 };
474
475 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
476 {
477         unsigned int max_nr;
478
479         if (soc_is_exynos5250())
480                 max_nr = EXYNOS5_MAX_COMBINER_NR;
481         else
482                 max_nr = EXYNOS4_MAX_COMBINER_NR;
483
484         if (combiner_nr >= max_nr)
485                 BUG();
486         if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
487                 BUG();
488         irq_set_chained_handler(irq, combiner_handle_cascade_irq);
489 }
490
491 static void __init combiner_init_one(unsigned int combiner_nr,
492                                      void __iomem *base)
493 {
494         combiner_data[combiner_nr].base = base;
495         combiner_data[combiner_nr].irq_offset = irq_find_mapping(
496                 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
497         combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
498
499         /* Disable all interrupts */
500         __raw_writel(combiner_data[combiner_nr].irq_mask,
501                      base + COMBINER_ENABLE_CLEAR);
502 }
503
504 #ifdef CONFIG_OF
505 static int combiner_irq_domain_xlate(struct irq_domain *d,
506                                      struct device_node *controller,
507                                      const u32 *intspec, unsigned int intsize,
508                                      unsigned long *out_hwirq,
509                                      unsigned int *out_type)
510 {
511         if (d->of_node != controller)
512                 return -EINVAL;
513
514         if (intsize < 2)
515                 return -EINVAL;
516
517         *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
518         *out_type = 0;
519
520         return 0;
521 }
522 #else
523 static int combiner_irq_domain_xlate(struct irq_domain *d,
524                                      struct device_node *controller,
525                                      const u32 *intspec, unsigned int intsize,
526                                      unsigned long *out_hwirq,
527                                      unsigned int *out_type)
528 {
529         return -EINVAL;
530 }
531 #endif
532
533 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
534                                    irq_hw_number_t hw)
535 {
536         irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
537         irq_set_chip_data(irq, &combiner_data[hw >> 3]);
538         set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
539
540         return 0;
541 }
542
543 static struct irq_domain_ops combiner_irq_domain_ops = {
544         .xlate  = combiner_irq_domain_xlate,
545         .map    = combiner_irq_domain_map,
546 };
547
548 static void __init combiner_init(void __iomem *combiner_base,
549                                  struct device_node *np)
550 {
551         int i, irq, irq_base;
552         unsigned int max_nr, nr_irq;
553
554         if (np) {
555                 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
556                         pr_warning("%s: number of combiners not specified, "
557                                 "setting default as %d.\n",
558                                 __func__, EXYNOS4_MAX_COMBINER_NR);
559                         max_nr = EXYNOS4_MAX_COMBINER_NR;
560                 }
561         } else {
562                 max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
563                                                 EXYNOS4_MAX_COMBINER_NR;
564         }
565         nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
566
567         irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
568         if (IS_ERR_VALUE(irq_base)) {
569                 irq_base = COMBINER_IRQ(0, 0);
570                 pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
571         }
572
573         combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
574                                 &combiner_irq_domain_ops, &combiner_data);
575         if (WARN_ON(!combiner_irq_domain)) {
576                 pr_warning("%s: irq domain init failed\n", __func__);
577                 return;
578         }
579
580         for (i = 0; i < max_nr; i++) {
581                 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
582                 irq = IRQ_SPI(i);
583 #ifdef CONFIG_OF
584                 if (np)
585                         irq = irq_of_parse_and_map(np, i);
586 #endif
587                 combiner_cascade_irq(i, irq);
588         }
589 }
590
591 #ifdef CONFIG_OF
592 int __init combiner_of_init(struct device_node *np, struct device_node *parent)
593 {
594         void __iomem *combiner_base;
595
596         combiner_base = of_iomap(np, 0);
597         if (!combiner_base) {
598                 pr_err("%s: failed to map combiner registers\n", __func__);
599                 return -ENXIO;
600         }
601
602         combiner_init(combiner_base, np);
603
604         return 0;
605 }
606
607 static const struct of_device_id exynos4_dt_irq_match[] = {
608         { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
609         { .compatible = "samsung,exynos4210-combiner",
610                         .data = combiner_of_init, },
611         {},
612 };
613 #endif
614
615 void __init exynos4_init_irq(void)
616 {
617         unsigned int gic_bank_offset;
618
619         gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
620
621         if (!of_have_populated_dt())
622                 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
623 #ifdef CONFIG_OF
624         else
625                 of_irq_init(exynos4_dt_irq_match);
626 #endif
627
628         if (!of_have_populated_dt())
629                 combiner_init(S5P_VA_COMBINER_BASE, NULL);
630
631         /*
632          * The parameters of s5p_init_irq() are for VIC init.
633          * Theses parameters should be NULL and 0 because EXYNOS4
634          * uses GIC instead of VIC.
635          */
636         s5p_init_irq(NULL, 0);
637 }
638
639 void __init exynos5_init_irq(void)
640 {
641 #ifdef CONFIG_OF
642         of_irq_init(exynos4_dt_irq_match);
643 #endif
644         /*
645          * The parameters of s5p_init_irq() are for VIC init.
646          * Theses parameters should be NULL and 0 because EXYNOS4
647          * uses GIC instead of VIC.
648          */
649         s5p_init_irq(NULL, 0);
650 }
651
652 struct bus_type exynos_subsys = {
653         .name           = "exynos-core",
654         .dev_name       = "exynos-core",
655 };
656
657 static struct device exynos4_dev = {
658         .bus    = &exynos_subsys,
659 };
660
661 static int __init exynos_core_init(void)
662 {
663         return subsys_system_register(&exynos_subsys, NULL);
664 }
665 core_initcall(exynos_core_init);
666
667 #ifdef CONFIG_CACHE_L2X0
668 static int __init exynos4_l2x0_cache_init(void)
669 {
670         int ret;
671
672         if (soc_is_exynos5250())
673                 return 0;
674
675         ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
676         if (!ret) {
677                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
678                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
679                 return 0;
680         }
681
682         if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
683                 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
684                 /* TAG, Data Latency Control: 2 cycles */
685                 l2x0_saved_regs.tag_latency = 0x110;
686
687                 if (soc_is_exynos4212() || soc_is_exynos4412())
688                         l2x0_saved_regs.data_latency = 0x120;
689                 else
690                         l2x0_saved_regs.data_latency = 0x110;
691
692                 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
693                 l2x0_saved_regs.pwr_ctrl =
694                         (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
695
696                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
697
698                 __raw_writel(l2x0_saved_regs.tag_latency,
699                                 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
700                 __raw_writel(l2x0_saved_regs.data_latency,
701                                 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
702
703                 /* L2X0 Prefetch Control */
704                 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
705                                 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
706
707                 /* L2X0 Power Control */
708                 __raw_writel(l2x0_saved_regs.pwr_ctrl,
709                                 S5P_VA_L2CC + L2X0_POWER_CTRL);
710
711                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
712                 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
713         }
714
715         l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
716         return 0;
717 }
718 early_initcall(exynos4_l2x0_cache_init);
719 #endif
720
721 static int __init exynos_init(void)
722 {
723         printk(KERN_INFO "EXYNOS: Initializing architecture\n");
724
725         return device_register(&exynos4_dev);
726 }
727
728 /* uart registration process */
729
730 static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
731 {
732         struct s3c2410_uartcfg *tcfg = cfg;
733         u32 ucnt;
734
735         for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
736                 tcfg->has_fracval = 1;
737
738         if (soc_is_exynos5250())
739                 s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
740         else
741                 s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
742 }
743
744 static void __iomem *exynos_eint_base;
745
746 static DEFINE_SPINLOCK(eint_lock);
747
748 static unsigned int eint0_15_data[16];
749
750 static inline int exynos4_irq_to_gpio(unsigned int irq)
751 {
752         if (irq < IRQ_EINT(0))
753                 return -EINVAL;
754
755         irq -= IRQ_EINT(0);
756         if (irq < 8)
757                 return EXYNOS4_GPX0(irq);
758
759         irq -= 8;
760         if (irq < 8)
761                 return EXYNOS4_GPX1(irq);
762
763         irq -= 8;
764         if (irq < 8)
765                 return EXYNOS4_GPX2(irq);
766
767         irq -= 8;
768         if (irq < 8)
769                 return EXYNOS4_GPX3(irq);
770
771         return -EINVAL;
772 }
773
774 static inline int exynos5_irq_to_gpio(unsigned int irq)
775 {
776         if (irq < IRQ_EINT(0))
777                 return -EINVAL;
778
779         irq -= IRQ_EINT(0);
780         if (irq < 8)
781                 return EXYNOS5_GPX0(irq);
782
783         irq -= 8;
784         if (irq < 8)
785                 return EXYNOS5_GPX1(irq);
786
787         irq -= 8;
788         if (irq < 8)
789                 return EXYNOS5_GPX2(irq);
790
791         irq -= 8;
792         if (irq < 8)
793                 return EXYNOS5_GPX3(irq);
794
795         return -EINVAL;
796 }
797
798 static unsigned int exynos4_eint0_15_src_int[16] = {
799         EXYNOS4_IRQ_EINT0,
800         EXYNOS4_IRQ_EINT1,
801         EXYNOS4_IRQ_EINT2,
802         EXYNOS4_IRQ_EINT3,
803         EXYNOS4_IRQ_EINT4,
804         EXYNOS4_IRQ_EINT5,
805         EXYNOS4_IRQ_EINT6,
806         EXYNOS4_IRQ_EINT7,
807         EXYNOS4_IRQ_EINT8,
808         EXYNOS4_IRQ_EINT9,
809         EXYNOS4_IRQ_EINT10,
810         EXYNOS4_IRQ_EINT11,
811         EXYNOS4_IRQ_EINT12,
812         EXYNOS4_IRQ_EINT13,
813         EXYNOS4_IRQ_EINT14,
814         EXYNOS4_IRQ_EINT15,
815 };
816
817 static unsigned int exynos5_eint0_15_src_int[16] = {
818         EXYNOS5_IRQ_EINT0,
819         EXYNOS5_IRQ_EINT1,
820         EXYNOS5_IRQ_EINT2,
821         EXYNOS5_IRQ_EINT3,
822         EXYNOS5_IRQ_EINT4,
823         EXYNOS5_IRQ_EINT5,
824         EXYNOS5_IRQ_EINT6,
825         EXYNOS5_IRQ_EINT7,
826         EXYNOS5_IRQ_EINT8,
827         EXYNOS5_IRQ_EINT9,
828         EXYNOS5_IRQ_EINT10,
829         EXYNOS5_IRQ_EINT11,
830         EXYNOS5_IRQ_EINT12,
831         EXYNOS5_IRQ_EINT13,
832         EXYNOS5_IRQ_EINT14,
833         EXYNOS5_IRQ_EINT15,
834 };
835 static inline void exynos_irq_eint_mask(struct irq_data *data)
836 {
837         u32 mask;
838
839         spin_lock(&eint_lock);
840         mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
841         mask |= EINT_OFFSET_BIT(data->irq);
842         __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
843         spin_unlock(&eint_lock);
844 }
845
846 static void exynos_irq_eint_unmask(struct irq_data *data)
847 {
848         u32 mask;
849
850         spin_lock(&eint_lock);
851         mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
852         mask &= ~(EINT_OFFSET_BIT(data->irq));
853         __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
854         spin_unlock(&eint_lock);
855 }
856
857 static inline void exynos_irq_eint_ack(struct irq_data *data)
858 {
859         __raw_writel(EINT_OFFSET_BIT(data->irq),
860                      EINT_PEND(exynos_eint_base, data->irq));
861 }
862
863 static void exynos_irq_eint_maskack(struct irq_data *data)
864 {
865         exynos_irq_eint_mask(data);
866         exynos_irq_eint_ack(data);
867 }
868
869 static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
870 {
871         int offs = EINT_OFFSET(data->irq);
872         int shift;
873         u32 ctrl, mask;
874         u32 newvalue = 0;
875
876         switch (type) {
877         case IRQ_TYPE_EDGE_RISING:
878                 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
879                 break;
880
881         case IRQ_TYPE_EDGE_FALLING:
882                 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
883                 break;
884
885         case IRQ_TYPE_EDGE_BOTH:
886                 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
887                 break;
888
889         case IRQ_TYPE_LEVEL_LOW:
890                 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
891                 break;
892
893         case IRQ_TYPE_LEVEL_HIGH:
894                 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
895                 break;
896
897         default:
898                 printk(KERN_ERR "No such irq type %d", type);
899                 return -EINVAL;
900         }
901
902         shift = (offs & 0x7) * 4;
903         mask = 0x7 << shift;
904
905         spin_lock(&eint_lock);
906         ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
907         ctrl &= ~mask;
908         ctrl |= newvalue << shift;
909         __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
910         spin_unlock(&eint_lock);
911
912         if (soc_is_exynos5250())
913                 s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
914         else
915                 s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
916
917         return 0;
918 }
919
920 static struct irq_chip exynos_irq_eint = {
921         .name           = "exynos-eint",
922         .irq_mask       = exynos_irq_eint_mask,
923         .irq_unmask     = exynos_irq_eint_unmask,
924         .irq_mask_ack   = exynos_irq_eint_maskack,
925         .irq_ack        = exynos_irq_eint_ack,
926         .irq_set_type   = exynos_irq_eint_set_type,
927 #ifdef CONFIG_PM
928         .irq_set_wake   = s3c_irqext_wake,
929 #endif
930 };
931
932 /*
933  * exynos4_irq_demux_eint
934  *
935  * This function demuxes the IRQ from from EINTs 16 to 31.
936  * It is designed to be inlined into the specific handler
937  * s5p_irq_demux_eintX_Y.
938  *
939  * Each EINT pend/mask registers handle eight of them.
940  */
941 static inline void exynos_irq_demux_eint(unsigned int start)
942 {
943         unsigned int irq;
944
945         u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
946         u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
947
948         status &= ~mask;
949         status &= 0xff;
950
951         while (status) {
952                 irq = fls(status) - 1;
953                 generic_handle_irq(irq + start);
954                 status &= ~(1 << irq);
955         }
956 }
957
958 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
959 {
960         struct irq_chip *chip = irq_get_chip(irq);
961         chained_irq_enter(chip, desc);
962         exynos_irq_demux_eint(IRQ_EINT(16));
963         exynos_irq_demux_eint(IRQ_EINT(24));
964         chained_irq_exit(chip, desc);
965 }
966
967 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
968 {
969         u32 *irq_data = irq_get_handler_data(irq);
970         struct irq_chip *chip = irq_get_chip(irq);
971
972         chained_irq_enter(chip, desc);
973         chip->irq_mask(&desc->irq_data);
974
975         if (chip->irq_ack)
976                 chip->irq_ack(&desc->irq_data);
977
978         generic_handle_irq(*irq_data);
979
980         chip->irq_unmask(&desc->irq_data);
981         chained_irq_exit(chip, desc);
982 }
983
984 static int __init exynos_init_irq_eint(void)
985 {
986         int irq;
987
988 #ifdef CONFIG_PINCTRL_SAMSUNG
989         /*
990          * The Samsung pinctrl driver provides an integrated gpio/pinmux/pinconf
991          * functionality along with support for external gpio and wakeup
992          * interrupts. If the samsung pinctrl driver is enabled and includes
993          * the wakeup interrupt support, then the setting up external wakeup
994          * interrupts here can be skipped. This check here is temporary to
995          * allow exynos4 platforms that do not use Samsung pinctrl driver to
996          * co-exist with platforms that do. When all of the Samsung Exynos4
997          * platforms switch over to using the pinctrl driver, the wakeup
998          * interrupt support code here can be completely removed.
999          */
1000         struct device_node *pctrl_np, *wkup_np;
1001         const char *pctrl_compat = "samsung,pinctrl-exynos4210";
1002         const char *wkup_compat = "samsung,exynos4210-wakeup-eint";
1003
1004         for_each_compatible_node(pctrl_np, NULL, pctrl_compat) {
1005                 if (of_device_is_available(pctrl_np)) {
1006                         wkup_np = of_find_compatible_node(pctrl_np, NULL,
1007                                                         wkup_compat);
1008                         if (wkup_np)
1009                                 return -ENODEV;
1010                 }
1011         }
1012 #endif
1013
1014         if (soc_is_exynos5250())
1015                 exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
1016         else
1017                 exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
1018
1019         if (exynos_eint_base == NULL) {
1020                 pr_err("unable to ioremap for EINT base address\n");
1021                 return -ENOMEM;
1022         }
1023
1024         for (irq = 0 ; irq <= 31 ; irq++) {
1025                 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
1026                                          handle_level_irq);
1027                 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
1028         }
1029
1030         irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
1031
1032         for (irq = 0 ; irq <= 15 ; irq++) {
1033                 eint0_15_data[irq] = IRQ_EINT(irq);
1034
1035                 if (soc_is_exynos5250()) {
1036                         irq_set_handler_data(exynos5_eint0_15_src_int[irq],
1037                                              &eint0_15_data[irq]);
1038                         irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
1039                                                 exynos_irq_eint0_15);
1040                 } else {
1041                         irq_set_handler_data(exynos4_eint0_15_src_int[irq],
1042                                              &eint0_15_data[irq]);
1043                         irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
1044                                                 exynos_irq_eint0_15);
1045                 }
1046         }
1047
1048         return 0;
1049 }
1050 arch_initcall(exynos_init_irq_eint);