treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[sfrench/cifs-2.6.git] / arch / mips / lantiq / irq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) 2010 John Crispin <john@phrozen.org>
5  * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
6  */
7
8 #include <linux/interrupt.h>
9 #include <linux/ioport.h>
10 #include <linux/sched.h>
11 #include <linux/irqdomain.h>
12 #include <linux/of_platform.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
15
16 #include <asm/bootinfo.h>
17 #include <asm/irq_cpu.h>
18
19 #include <lantiq_soc.h>
20 #include <irq.h>
21
22 /* register definitions - internal irqs */
23 #define LTQ_ICU_IM0_ISR         0x0000
24 #define LTQ_ICU_IM0_IER         0x0008
25 #define LTQ_ICU_IM0_IOSR        0x0010
26 #define LTQ_ICU_IM0_IRSR        0x0018
27 #define LTQ_ICU_IM0_IMR         0x0020
28 #define LTQ_ICU_IM1_ISR         0x0028
29 #define LTQ_ICU_OFFSET          (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
30
31 /* register definitions - external irqs */
32 #define LTQ_EIU_EXIN_C          0x0000
33 #define LTQ_EIU_EXIN_INIC       0x0004
34 #define LTQ_EIU_EXIN_INC        0x0008
35 #define LTQ_EIU_EXIN_INEN       0x000C
36
37 /* number of external interrupts */
38 #define MAX_EIU                 6
39
40 /* the performance counter */
41 #define LTQ_PERF_IRQ            (INT_NUM_IM4_IRL0 + 31)
42
43 /*
44  * irqs generated by devices attached to the EBU need to be acked in
45  * a special manner
46  */
47 #define LTQ_ICU_EBU_IRQ         22
48
49 #define ltq_icu_w32(m, x, y)    ltq_w32((x), ltq_icu_membase[m] + (y))
50 #define ltq_icu_r32(m, x)       ltq_r32(ltq_icu_membase[m] + (x))
51
52 #define ltq_eiu_w32(x, y)       ltq_w32((x), ltq_eiu_membase + (y))
53 #define ltq_eiu_r32(x)          ltq_r32(ltq_eiu_membase + (x))
54
55 /* our 2 ipi interrupts for VSMP */
56 #define MIPS_CPU_IPI_RESCHED_IRQ        0
57 #define MIPS_CPU_IPI_CALL_IRQ           1
58
59 /* we have a cascade of 8 irqs */
60 #define MIPS_CPU_IRQ_CASCADE            8
61
62 static int exin_avail;
63 static u32 ltq_eiu_irq[MAX_EIU];
64 static void __iomem *ltq_icu_membase[MAX_IM];
65 static void __iomem *ltq_eiu_membase;
66 static struct irq_domain *ltq_domain;
67 static int ltq_perfcount_irq;
68
69 int ltq_eiu_get_irq(int exin)
70 {
71         if (exin < exin_avail)
72                 return ltq_eiu_irq[exin];
73         return -1;
74 }
75
76 void ltq_disable_irq(struct irq_data *d)
77 {
78         u32 ier = LTQ_ICU_IM0_IER;
79         int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
80         int im = offset / INT_NUM_IM_OFFSET;
81
82         offset %= INT_NUM_IM_OFFSET;
83         ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
84 }
85
86 void ltq_mask_and_ack_irq(struct irq_data *d)
87 {
88         u32 ier = LTQ_ICU_IM0_IER;
89         u32 isr = LTQ_ICU_IM0_ISR;
90         int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
91         int im = offset / INT_NUM_IM_OFFSET;
92
93         offset %= INT_NUM_IM_OFFSET;
94         ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
95         ltq_icu_w32(im, BIT(offset), isr);
96 }
97
98 static void ltq_ack_irq(struct irq_data *d)
99 {
100         u32 isr = LTQ_ICU_IM0_ISR;
101         int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
102         int im = offset / INT_NUM_IM_OFFSET;
103
104         offset %= INT_NUM_IM_OFFSET;
105         ltq_icu_w32(im, BIT(offset), isr);
106 }
107
108 void ltq_enable_irq(struct irq_data *d)
109 {
110         u32 ier = LTQ_ICU_IM0_IER;
111         int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
112         int im = offset / INT_NUM_IM_OFFSET;
113
114         offset %= INT_NUM_IM_OFFSET;
115         ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
116 }
117
118 static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
119 {
120         int i;
121
122         for (i = 0; i < exin_avail; i++) {
123                 if (d->hwirq == ltq_eiu_irq[i]) {
124                         int val = 0;
125                         int edge = 0;
126
127                         switch (type) {
128                         case IRQF_TRIGGER_NONE:
129                                 break;
130                         case IRQF_TRIGGER_RISING:
131                                 val = 1;
132                                 edge = 1;
133                                 break;
134                         case IRQF_TRIGGER_FALLING:
135                                 val = 2;
136                                 edge = 1;
137                                 break;
138                         case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
139                                 val = 3;
140                                 edge = 1;
141                                 break;
142                         case IRQF_TRIGGER_HIGH:
143                                 val = 5;
144                                 break;
145                         case IRQF_TRIGGER_LOW:
146                                 val = 6;
147                                 break;
148                         default:
149                                 pr_err("invalid type %d for irq %ld\n",
150                                         type, d->hwirq);
151                                 return -EINVAL;
152                         }
153
154                         if (edge)
155                                 irq_set_handler(d->hwirq, handle_edge_irq);
156
157                         ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
158                                 (val << (i * 4)), LTQ_EIU_EXIN_C);
159                 }
160         }
161
162         return 0;
163 }
164
165 static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
166 {
167         int i;
168
169         ltq_enable_irq(d);
170         for (i = 0; i < exin_avail; i++) {
171                 if (d->hwirq == ltq_eiu_irq[i]) {
172                         /* by default we are low level triggered */
173                         ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
174                         /* clear all pending */
175                         ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
176                                 LTQ_EIU_EXIN_INC);
177                         /* enable */
178                         ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
179                                 LTQ_EIU_EXIN_INEN);
180                         break;
181                 }
182         }
183
184         return 0;
185 }
186
187 static void ltq_shutdown_eiu_irq(struct irq_data *d)
188 {
189         int i;
190
191         ltq_disable_irq(d);
192         for (i = 0; i < exin_avail; i++) {
193                 if (d->hwirq == ltq_eiu_irq[i]) {
194                         /* disable */
195                         ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
196                                 LTQ_EIU_EXIN_INEN);
197                         break;
198                 }
199         }
200 }
201
202 static struct irq_chip ltq_irq_type = {
203         .name = "icu",
204         .irq_enable = ltq_enable_irq,
205         .irq_disable = ltq_disable_irq,
206         .irq_unmask = ltq_enable_irq,
207         .irq_ack = ltq_ack_irq,
208         .irq_mask = ltq_disable_irq,
209         .irq_mask_ack = ltq_mask_and_ack_irq,
210 };
211
212 static struct irq_chip ltq_eiu_type = {
213         .name = "eiu",
214         .irq_startup = ltq_startup_eiu_irq,
215         .irq_shutdown = ltq_shutdown_eiu_irq,
216         .irq_enable = ltq_enable_irq,
217         .irq_disable = ltq_disable_irq,
218         .irq_unmask = ltq_enable_irq,
219         .irq_ack = ltq_ack_irq,
220         .irq_mask = ltq_disable_irq,
221         .irq_mask_ack = ltq_mask_and_ack_irq,
222         .irq_set_type = ltq_eiu_settype,
223 };
224
225 static void ltq_hw_irq_handler(struct irq_desc *desc)
226 {
227         int module = irq_desc_get_irq(desc) - 2;
228         u32 irq;
229         int hwirq;
230
231         irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
232         if (irq == 0)
233                 return;
234
235         /*
236          * silicon bug causes only the msb set to 1 to be valid. all
237          * other bits might be bogus
238          */
239         irq = __fls(irq);
240         hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
241         generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
242
243         /* if this is a EBU irq, we need to ack it or get a deadlock */
244         if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
245                 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
246                         LTQ_EBU_PCC_ISTAT);
247 }
248
249 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
250 {
251         struct irq_chip *chip = &ltq_irq_type;
252         int i;
253
254         if (hw < MIPS_CPU_IRQ_CASCADE)
255                 return 0;
256
257         for (i = 0; i < exin_avail; i++)
258                 if (hw == ltq_eiu_irq[i])
259                         chip = &ltq_eiu_type;
260
261         irq_set_chip_and_handler(irq, chip, handle_level_irq);
262
263         return 0;
264 }
265
266 static const struct irq_domain_ops irq_domain_ops = {
267         .xlate = irq_domain_xlate_onetwocell,
268         .map = icu_map,
269 };
270
271 int __init icu_of_init(struct device_node *node, struct device_node *parent)
272 {
273         struct device_node *eiu_node;
274         struct resource res;
275         int i, ret;
276
277         for (i = 0; i < MAX_IM; i++) {
278                 if (of_address_to_resource(node, i, &res))
279                         panic("Failed to get icu memory range");
280
281                 if (!request_mem_region(res.start, resource_size(&res),
282                                         res.name))
283                         pr_err("Failed to request icu memory");
284
285                 ltq_icu_membase[i] = ioremap_nocache(res.start,
286                                         resource_size(&res));
287                 if (!ltq_icu_membase[i])
288                         panic("Failed to remap icu memory");
289         }
290
291         /* turn off all irqs by default */
292         for (i = 0; i < MAX_IM; i++) {
293                 /* make sure all irqs are turned off by default */
294                 ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
295                 /* clear all possibly pending interrupts */
296                 ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
297         }
298
299         mips_cpu_irq_init();
300
301         for (i = 0; i < MAX_IM; i++)
302                 irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
303
304         ltq_domain = irq_domain_add_linear(node,
305                 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
306                 &irq_domain_ops, 0);
307
308         /* tell oprofile which irq to use */
309         ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
310
311         /* the external interrupts are optional and xway only */
312         eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
313         if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
314                 /* find out how many external irq sources we have */
315                 exin_avail = of_property_count_u32_elems(eiu_node,
316                                                          "lantiq,eiu-irqs");
317
318                 if (exin_avail > MAX_EIU)
319                         exin_avail = MAX_EIU;
320
321                 ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
322                                                 ltq_eiu_irq, exin_avail);
323                 if (ret)
324                         panic("failed to load external irq resources");
325
326                 if (!request_mem_region(res.start, resource_size(&res),
327                                                         res.name))
328                         pr_err("Failed to request eiu memory");
329
330                 ltq_eiu_membase = ioremap_nocache(res.start,
331                                                         resource_size(&res));
332                 if (!ltq_eiu_membase)
333                         panic("Failed to remap eiu memory");
334         }
335
336         return 0;
337 }
338
339 int get_c0_perfcount_int(void)
340 {
341         return ltq_perfcount_irq;
342 }
343 EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
344
345 unsigned int get_c0_compare_int(void)
346 {
347         return CP0_LEGACY_COMPARE_IRQ;
348 }
349
350 static struct of_device_id __initdata of_irq_ids[] = {
351         { .compatible = "lantiq,icu", .data = icu_of_init },
352         {},
353 };
354
355 void __init arch_init_irq(void)
356 {
357         of_irq_init(of_irq_ids);
358 }