Merge tag 'for-linus' of git://github.com/openrisc/linux
[sfrench/cifs-2.6.git] / drivers / base / regmap / regmap-irq.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap based irq_chip
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
17
18 #include "internal.h"
19
20 struct regmap_irq_chip_data {
21         struct mutex lock;
22         struct irq_chip irq_chip;
23
24         struct regmap *map;
25         const struct regmap_irq_chip *chip;
26
27         int irq_base;
28         struct irq_domain *domain;
29
30         int irq;
31         int wake_count;
32
33         void *status_reg_buf;
34         unsigned int *main_status_buf;
35         unsigned int *status_buf;
36         unsigned int *mask_buf;
37         unsigned int *mask_buf_def;
38         unsigned int *wake_buf;
39         unsigned int *type_buf;
40         unsigned int *type_buf_def;
41         unsigned int **virt_buf;
42
43         unsigned int irq_reg_stride;
44         unsigned int type_reg_stride;
45
46         bool clear_status:1;
47 };
48
49 static int sub_irq_reg(struct regmap_irq_chip_data *data,
50                        unsigned int base_reg, int i)
51 {
52         const struct regmap_irq_chip *chip = data->chip;
53         struct regmap *map = data->map;
54         struct regmap_irq_sub_irq_map *subreg;
55         unsigned int offset;
56         int reg = 0;
57
58         if (!chip->sub_reg_offsets || !chip->not_fixed_stride) {
59                 /* Assume linear mapping */
60                 reg = base_reg + (i * map->reg_stride * data->irq_reg_stride);
61         } else {
62                 subreg = &chip->sub_reg_offsets[i];
63                 offset = subreg->offset[0];
64                 reg = base_reg + offset;
65         }
66
67         return reg;
68 }
69
70 static inline const
71 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
72                                      int irq)
73 {
74         return &data->chip->irqs[irq];
75 }
76
77 static void regmap_irq_lock(struct irq_data *data)
78 {
79         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
80
81         mutex_lock(&d->lock);
82 }
83
84 static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
85                                   unsigned int reg, unsigned int mask,
86                                   unsigned int val)
87 {
88         if (d->chip->mask_writeonly)
89                 return regmap_write_bits(d->map, reg, mask, val);
90         else
91                 return regmap_update_bits(d->map, reg, mask, val);
92 }
93
94 static void regmap_irq_sync_unlock(struct irq_data *data)
95 {
96         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
97         struct regmap *map = d->map;
98         int i, j, ret;
99         u32 reg;
100         u32 unmask_offset;
101         u32 val;
102
103         if (d->chip->runtime_pm) {
104                 ret = pm_runtime_get_sync(map->dev);
105                 if (ret < 0)
106                         dev_err(map->dev, "IRQ sync failed to resume: %d\n",
107                                 ret);
108         }
109
110         if (d->clear_status) {
111                 for (i = 0; i < d->chip->num_regs; i++) {
112                         reg = sub_irq_reg(d, d->chip->status_base, i);
113
114                         ret = regmap_read(map, reg, &val);
115                         if (ret)
116                                 dev_err(d->map->dev,
117                                         "Failed to clear the interrupt status bits\n");
118                 }
119
120                 d->clear_status = false;
121         }
122
123         /*
124          * If there's been a change in the mask write it back to the
125          * hardware.  We rely on the use of the regmap core cache to
126          * suppress pointless writes.
127          */
128         for (i = 0; i < d->chip->num_regs; i++) {
129                 if (!d->chip->mask_base)
130                         continue;
131
132                 reg = sub_irq_reg(d, d->chip->mask_base, i);
133                 if (d->chip->mask_invert) {
134                         ret = regmap_irq_update_bits(d, reg,
135                                          d->mask_buf_def[i], ~d->mask_buf[i]);
136                 } else if (d->chip->unmask_base) {
137                         /* set mask with mask_base register */
138                         ret = regmap_irq_update_bits(d, reg,
139                                         d->mask_buf_def[i], ~d->mask_buf[i]);
140                         if (ret < 0)
141                                 dev_err(d->map->dev,
142                                         "Failed to sync unmasks in %x\n",
143                                         reg);
144                         unmask_offset = d->chip->unmask_base -
145                                                         d->chip->mask_base;
146                         /* clear mask with unmask_base register */
147                         ret = regmap_irq_update_bits(d,
148                                         reg + unmask_offset,
149                                         d->mask_buf_def[i],
150                                         d->mask_buf[i]);
151                 } else {
152                         ret = regmap_irq_update_bits(d, reg,
153                                          d->mask_buf_def[i], d->mask_buf[i]);
154                 }
155                 if (ret != 0)
156                         dev_err(d->map->dev, "Failed to sync masks in %x\n",
157                                 reg);
158
159                 reg = sub_irq_reg(d, d->chip->wake_base, i);
160                 if (d->wake_buf) {
161                         if (d->chip->wake_invert)
162                                 ret = regmap_irq_update_bits(d, reg,
163                                                          d->mask_buf_def[i],
164                                                          ~d->wake_buf[i]);
165                         else
166                                 ret = regmap_irq_update_bits(d, reg,
167                                                          d->mask_buf_def[i],
168                                                          d->wake_buf[i]);
169                         if (ret != 0)
170                                 dev_err(d->map->dev,
171                                         "Failed to sync wakes in %x: %d\n",
172                                         reg, ret);
173                 }
174
175                 if (!d->chip->init_ack_masked)
176                         continue;
177                 /*
178                  * Ack all the masked interrupts unconditionally,
179                  * OR if there is masked interrupt which hasn't been Acked,
180                  * it'll be ignored in irq handler, then may introduce irq storm
181                  */
182                 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
183                         reg = sub_irq_reg(d, d->chip->ack_base, i);
184
185                         /* some chips ack by write 0 */
186                         if (d->chip->ack_invert)
187                                 ret = regmap_write(map, reg, ~d->mask_buf[i]);
188                         else
189                                 ret = regmap_write(map, reg, d->mask_buf[i]);
190                         if (d->chip->clear_ack) {
191                                 if (d->chip->ack_invert && !ret)
192                                         ret = regmap_write(map, reg,
193                                                            d->mask_buf[i]);
194                                 else if (!ret)
195                                         ret = regmap_write(map, reg,
196                                                            ~d->mask_buf[i]);
197                         }
198                         if (ret != 0)
199                                 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
200                                         reg, ret);
201                 }
202         }
203
204         /* Don't update the type bits if we're using mask bits for irq type. */
205         if (!d->chip->type_in_mask) {
206                 for (i = 0; i < d->chip->num_type_reg; i++) {
207                         if (!d->type_buf_def[i])
208                                 continue;
209                         reg = sub_irq_reg(d, d->chip->type_base, i);
210                         if (d->chip->type_invert)
211                                 ret = regmap_irq_update_bits(d, reg,
212                                         d->type_buf_def[i], ~d->type_buf[i]);
213                         else
214                                 ret = regmap_irq_update_bits(d, reg,
215                                         d->type_buf_def[i], d->type_buf[i]);
216                         if (ret != 0)
217                                 dev_err(d->map->dev, "Failed to sync type in %x\n",
218                                         reg);
219                 }
220         }
221
222         if (d->chip->num_virt_regs) {
223                 for (i = 0; i < d->chip->num_virt_regs; i++) {
224                         for (j = 0; j < d->chip->num_regs; j++) {
225                                 reg = sub_irq_reg(d, d->chip->virt_reg_base[i],
226                                                   j);
227                                 ret = regmap_write(map, reg, d->virt_buf[i][j]);
228                                 if (ret != 0)
229                                         dev_err(d->map->dev,
230                                                 "Failed to write virt 0x%x: %d\n",
231                                                 reg, ret);
232                         }
233                 }
234         }
235
236         if (d->chip->runtime_pm)
237                 pm_runtime_put(map->dev);
238
239         /* If we've changed our wakeup count propagate it to the parent */
240         if (d->wake_count < 0)
241                 for (i = d->wake_count; i < 0; i++)
242                         irq_set_irq_wake(d->irq, 0);
243         else if (d->wake_count > 0)
244                 for (i = 0; i < d->wake_count; i++)
245                         irq_set_irq_wake(d->irq, 1);
246
247         d->wake_count = 0;
248
249         mutex_unlock(&d->lock);
250 }
251
252 static void regmap_irq_enable(struct irq_data *data)
253 {
254         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
255         struct regmap *map = d->map;
256         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
257         unsigned int mask, type;
258
259         type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
260
261         /*
262          * The type_in_mask flag means that the underlying hardware uses
263          * separate mask bits for rising and falling edge interrupts, but
264          * we want to make them into a single virtual interrupt with
265          * configurable edge.
266          *
267          * If the interrupt we're enabling defines the falling or rising
268          * masks then instead of using the regular mask bits for this
269          * interrupt, use the value previously written to the type buffer
270          * at the corresponding offset in regmap_irq_set_type().
271          */
272         if (d->chip->type_in_mask && type)
273                 mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
274         else
275                 mask = irq_data->mask;
276
277         if (d->chip->clear_on_unmask)
278                 d->clear_status = true;
279
280         d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
281 }
282
283 static void regmap_irq_disable(struct irq_data *data)
284 {
285         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
286         struct regmap *map = d->map;
287         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
288
289         d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
290 }
291
292 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
293 {
294         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
295         struct regmap *map = d->map;
296         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
297         int reg;
298         const struct regmap_irq_type *t = &irq_data->type;
299
300         if ((t->types_supported & type) != type)
301                 return 0;
302
303         reg = t->type_reg_offset / map->reg_stride;
304
305         if (t->type_reg_mask)
306                 d->type_buf[reg] &= ~t->type_reg_mask;
307         else
308                 d->type_buf[reg] &= ~(t->type_falling_val |
309                                       t->type_rising_val |
310                                       t->type_level_low_val |
311                                       t->type_level_high_val);
312         switch (type) {
313         case IRQ_TYPE_EDGE_FALLING:
314                 d->type_buf[reg] |= t->type_falling_val;
315                 break;
316
317         case IRQ_TYPE_EDGE_RISING:
318                 d->type_buf[reg] |= t->type_rising_val;
319                 break;
320
321         case IRQ_TYPE_EDGE_BOTH:
322                 d->type_buf[reg] |= (t->type_falling_val |
323                                         t->type_rising_val);
324                 break;
325
326         case IRQ_TYPE_LEVEL_HIGH:
327                 d->type_buf[reg] |= t->type_level_high_val;
328                 break;
329
330         case IRQ_TYPE_LEVEL_LOW:
331                 d->type_buf[reg] |= t->type_level_low_val;
332                 break;
333         default:
334                 return -EINVAL;
335         }
336
337         if (d->chip->set_type_virt)
338                 return d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
339                                               reg);
340
341         return 0;
342 }
343
344 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
345 {
346         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
347         struct regmap *map = d->map;
348         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
349
350         if (on) {
351                 if (d->wake_buf)
352                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
353                                 &= ~irq_data->mask;
354                 d->wake_count++;
355         } else {
356                 if (d->wake_buf)
357                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
358                                 |= irq_data->mask;
359                 d->wake_count--;
360         }
361
362         return 0;
363 }
364
365 static const struct irq_chip regmap_irq_chip = {
366         .irq_bus_lock           = regmap_irq_lock,
367         .irq_bus_sync_unlock    = regmap_irq_sync_unlock,
368         .irq_disable            = regmap_irq_disable,
369         .irq_enable             = regmap_irq_enable,
370         .irq_set_type           = regmap_irq_set_type,
371         .irq_set_wake           = regmap_irq_set_wake,
372 };
373
374 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
375                                            unsigned int b)
376 {
377         const struct regmap_irq_chip *chip = data->chip;
378         struct regmap *map = data->map;
379         struct regmap_irq_sub_irq_map *subreg;
380         int i, ret = 0;
381
382         if (!chip->sub_reg_offsets) {
383                 /* Assume linear mapping */
384                 ret = regmap_read(map, chip->status_base +
385                                   (b * map->reg_stride * data->irq_reg_stride),
386                                    &data->status_buf[b]);
387         } else {
388                 subreg = &chip->sub_reg_offsets[b];
389                 for (i = 0; i < subreg->num_regs; i++) {
390                         unsigned int offset = subreg->offset[i];
391
392                         if (chip->not_fixed_stride)
393                                 ret = regmap_read(map,
394                                                 chip->status_base + offset,
395                                                 &data->status_buf[b]);
396                         else
397                                 ret = regmap_read(map,
398                                                 chip->status_base + offset,
399                                                 &data->status_buf[offset]);
400
401                         if (ret)
402                                 break;
403                 }
404         }
405         return ret;
406 }
407
408 static irqreturn_t regmap_irq_thread(int irq, void *d)
409 {
410         struct regmap_irq_chip_data *data = d;
411         const struct regmap_irq_chip *chip = data->chip;
412         struct regmap *map = data->map;
413         int ret, i;
414         bool handled = false;
415         u32 reg;
416
417         if (chip->handle_pre_irq)
418                 chip->handle_pre_irq(chip->irq_drv_data);
419
420         if (chip->runtime_pm) {
421                 ret = pm_runtime_get_sync(map->dev);
422                 if (ret < 0) {
423                         dev_err(map->dev, "IRQ thread failed to resume: %d\n",
424                                 ret);
425                         goto exit;
426                 }
427         }
428
429         /*
430          * Read only registers with active IRQs if the chip has 'main status
431          * register'. Else read in the statuses, using a single bulk read if
432          * possible in order to reduce the I/O overheads.
433          */
434
435         if (chip->num_main_regs) {
436                 unsigned int max_main_bits;
437                 unsigned long size;
438
439                 size = chip->num_regs * sizeof(unsigned int);
440
441                 max_main_bits = (chip->num_main_status_bits) ?
442                                  chip->num_main_status_bits : chip->num_regs;
443                 /* Clear the status buf as we don't read all status regs */
444                 memset(data->status_buf, 0, size);
445
446                 /* We could support bulk read for main status registers
447                  * but I don't expect to see devices with really many main
448                  * status registers so let's only support single reads for the
449                  * sake of simplicity. and add bulk reads only if needed
450                  */
451                 for (i = 0; i < chip->num_main_regs; i++) {
452                         ret = regmap_read(map, chip->main_status +
453                                   (i * map->reg_stride
454                                    * data->irq_reg_stride),
455                                   &data->main_status_buf[i]);
456                         if (ret) {
457                                 dev_err(map->dev,
458                                         "Failed to read IRQ status %d\n",
459                                         ret);
460                                 goto exit;
461                         }
462                 }
463
464                 /* Read sub registers with active IRQs */
465                 for (i = 0; i < chip->num_main_regs; i++) {
466                         unsigned int b;
467                         const unsigned long mreg = data->main_status_buf[i];
468
469                         for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
470                                 if (i * map->format.val_bytes * 8 + b >
471                                     max_main_bits)
472                                         break;
473                                 ret = read_sub_irq_data(data, b);
474
475                                 if (ret != 0) {
476                                         dev_err(map->dev,
477                                                 "Failed to read IRQ status %d\n",
478                                                 ret);
479                                         goto exit;
480                                 }
481                         }
482
483                 }
484         } else if (!map->use_single_read && map->reg_stride == 1 &&
485                    data->irq_reg_stride == 1) {
486
487                 u8 *buf8 = data->status_reg_buf;
488                 u16 *buf16 = data->status_reg_buf;
489                 u32 *buf32 = data->status_reg_buf;
490
491                 BUG_ON(!data->status_reg_buf);
492
493                 ret = regmap_bulk_read(map, chip->status_base,
494                                        data->status_reg_buf,
495                                        chip->num_regs);
496                 if (ret != 0) {
497                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
498                                 ret);
499                         goto exit;
500                 }
501
502                 for (i = 0; i < data->chip->num_regs; i++) {
503                         switch (map->format.val_bytes) {
504                         case 1:
505                                 data->status_buf[i] = buf8[i];
506                                 break;
507                         case 2:
508                                 data->status_buf[i] = buf16[i];
509                                 break;
510                         case 4:
511                                 data->status_buf[i] = buf32[i];
512                                 break;
513                         default:
514                                 BUG();
515                                 goto exit;
516                         }
517                 }
518
519         } else {
520                 for (i = 0; i < data->chip->num_regs; i++) {
521                         unsigned int reg = sub_irq_reg(data,
522                                         data->chip->status_base, i);
523                         ret = regmap_read(map, reg, &data->status_buf[i]);
524
525                         if (ret != 0) {
526                                 dev_err(map->dev,
527                                         "Failed to read IRQ status: %d\n",
528                                         ret);
529                                 goto exit;
530                         }
531                 }
532         }
533
534         /*
535          * Ignore masked IRQs and ack if we need to; we ack early so
536          * there is no race between handling and acknowleding the
537          * interrupt.  We assume that typically few of the interrupts
538          * will fire simultaneously so don't worry about overhead from
539          * doing a write per register.
540          */
541         for (i = 0; i < data->chip->num_regs; i++) {
542                 data->status_buf[i] &= ~data->mask_buf[i];
543
544                 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
545                         reg = sub_irq_reg(data, data->chip->ack_base, i);
546
547                         if (chip->ack_invert)
548                                 ret = regmap_write(map, reg,
549                                                 ~data->status_buf[i]);
550                         else
551                                 ret = regmap_write(map, reg,
552                                                 data->status_buf[i]);
553                         if (chip->clear_ack) {
554                                 if (chip->ack_invert && !ret)
555                                         ret = regmap_write(map, reg,
556                                                         data->status_buf[i]);
557                                 else if (!ret)
558                                         ret = regmap_write(map, reg,
559                                                         ~data->status_buf[i]);
560                         }
561                         if (ret != 0)
562                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
563                                         reg, ret);
564                 }
565         }
566
567         for (i = 0; i < chip->num_irqs; i++) {
568                 if (data->status_buf[chip->irqs[i].reg_offset /
569                                      map->reg_stride] & chip->irqs[i].mask) {
570                         handle_nested_irq(irq_find_mapping(data->domain, i));
571                         handled = true;
572                 }
573         }
574
575 exit:
576         if (chip->runtime_pm)
577                 pm_runtime_put(map->dev);
578
579         if (chip->handle_post_irq)
580                 chip->handle_post_irq(chip->irq_drv_data);
581
582         if (handled)
583                 return IRQ_HANDLED;
584         else
585                 return IRQ_NONE;
586 }
587
588 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
589                           irq_hw_number_t hw)
590 {
591         struct regmap_irq_chip_data *data = h->host_data;
592
593         irq_set_chip_data(virq, data);
594         irq_set_chip(virq, &data->irq_chip);
595         irq_set_nested_thread(virq, 1);
596         irq_set_parent(virq, data->irq);
597         irq_set_noprobe(virq);
598
599         return 0;
600 }
601
602 static const struct irq_domain_ops regmap_domain_ops = {
603         .map    = regmap_irq_map,
604         .xlate  = irq_domain_xlate_onetwocell,
605 };
606
607 /**
608  * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
609  *
610  * @fwnode: The firmware node where the IRQ domain should be added to.
611  * @map: The regmap for the device.
612  * @irq: The IRQ the device uses to signal interrupts.
613  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
614  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
615  * @chip: Configuration for the interrupt controller.
616  * @data: Runtime data structure for the controller, allocated on success.
617  *
618  * Returns 0 on success or an errno on failure.
619  *
620  * In order for this to be efficient the chip really should use a
621  * register cache.  The chip driver is responsible for restoring the
622  * register values used by the IRQ controller over suspend and resume.
623  */
624 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
625                                struct regmap *map, int irq,
626                                int irq_flags, int irq_base,
627                                const struct regmap_irq_chip *chip,
628                                struct regmap_irq_chip_data **data)
629 {
630         struct regmap_irq_chip_data *d;
631         int i;
632         int ret = -ENOMEM;
633         int num_type_reg;
634         u32 reg;
635         u32 unmask_offset;
636
637         if (chip->num_regs <= 0)
638                 return -EINVAL;
639
640         if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
641                 return -EINVAL;
642
643         for (i = 0; i < chip->num_irqs; i++) {
644                 if (chip->irqs[i].reg_offset % map->reg_stride)
645                         return -EINVAL;
646                 if (chip->irqs[i].reg_offset / map->reg_stride >=
647                     chip->num_regs)
648                         return -EINVAL;
649         }
650
651         if (chip->not_fixed_stride) {
652                 for (i = 0; i < chip->num_regs; i++)
653                         if (chip->sub_reg_offsets[i].num_regs != 1)
654                                 return -EINVAL;
655         }
656
657         if (irq_base) {
658                 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
659                 if (irq_base < 0) {
660                         dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
661                                  irq_base);
662                         return irq_base;
663                 }
664         }
665
666         d = kzalloc(sizeof(*d), GFP_KERNEL);
667         if (!d)
668                 return -ENOMEM;
669
670         if (chip->num_main_regs) {
671                 d->main_status_buf = kcalloc(chip->num_main_regs,
672                                              sizeof(unsigned int),
673                                              GFP_KERNEL);
674
675                 if (!d->main_status_buf)
676                         goto err_alloc;
677         }
678
679         d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
680                                 GFP_KERNEL);
681         if (!d->status_buf)
682                 goto err_alloc;
683
684         d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
685                               GFP_KERNEL);
686         if (!d->mask_buf)
687                 goto err_alloc;
688
689         d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
690                                   GFP_KERNEL);
691         if (!d->mask_buf_def)
692                 goto err_alloc;
693
694         if (chip->wake_base) {
695                 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
696                                       GFP_KERNEL);
697                 if (!d->wake_buf)
698                         goto err_alloc;
699         }
700
701         num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
702         if (num_type_reg) {
703                 d->type_buf_def = kcalloc(num_type_reg,
704                                           sizeof(unsigned int), GFP_KERNEL);
705                 if (!d->type_buf_def)
706                         goto err_alloc;
707
708                 d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
709                                       GFP_KERNEL);
710                 if (!d->type_buf)
711                         goto err_alloc;
712         }
713
714         if (chip->num_virt_regs) {
715                 /*
716                  * Create virt_buf[chip->num_extra_config_regs][chip->num_regs]
717                  */
718                 d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf),
719                                       GFP_KERNEL);
720                 if (!d->virt_buf)
721                         goto err_alloc;
722
723                 for (i = 0; i < chip->num_virt_regs; i++) {
724                         d->virt_buf[i] = kcalloc(chip->num_regs,
725                                                  sizeof(unsigned int),
726                                                  GFP_KERNEL);
727                         if (!d->virt_buf[i])
728                                 goto err_alloc;
729                 }
730         }
731
732         d->irq_chip = regmap_irq_chip;
733         d->irq_chip.name = chip->name;
734         d->irq = irq;
735         d->map = map;
736         d->chip = chip;
737         d->irq_base = irq_base;
738
739         if (chip->irq_reg_stride)
740                 d->irq_reg_stride = chip->irq_reg_stride;
741         else
742                 d->irq_reg_stride = 1;
743
744         if (chip->type_reg_stride)
745                 d->type_reg_stride = chip->type_reg_stride;
746         else
747                 d->type_reg_stride = 1;
748
749         if (!map->use_single_read && map->reg_stride == 1 &&
750             d->irq_reg_stride == 1) {
751                 d->status_reg_buf = kmalloc_array(chip->num_regs,
752                                                   map->format.val_bytes,
753                                                   GFP_KERNEL);
754                 if (!d->status_reg_buf)
755                         goto err_alloc;
756         }
757
758         mutex_init(&d->lock);
759
760         for (i = 0; i < chip->num_irqs; i++)
761                 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
762                         |= chip->irqs[i].mask;
763
764         /* Mask all the interrupts by default */
765         for (i = 0; i < chip->num_regs; i++) {
766                 d->mask_buf[i] = d->mask_buf_def[i];
767                 if (!chip->mask_base)
768                         continue;
769
770                 reg = sub_irq_reg(d, d->chip->mask_base, i);
771
772                 if (chip->mask_invert)
773                         ret = regmap_irq_update_bits(d, reg,
774                                          d->mask_buf[i], ~d->mask_buf[i]);
775                 else if (d->chip->unmask_base) {
776                         unmask_offset = d->chip->unmask_base -
777                                         d->chip->mask_base;
778                         ret = regmap_irq_update_bits(d,
779                                         reg + unmask_offset,
780                                         d->mask_buf[i],
781                                         d->mask_buf[i]);
782                 } else
783                         ret = regmap_irq_update_bits(d, reg,
784                                          d->mask_buf[i], d->mask_buf[i]);
785                 if (ret != 0) {
786                         dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
787                                 reg, ret);
788                         goto err_alloc;
789                 }
790
791                 if (!chip->init_ack_masked)
792                         continue;
793
794                 /* Ack masked but set interrupts */
795                 reg = sub_irq_reg(d, d->chip->status_base, i);
796                 ret = regmap_read(map, reg, &d->status_buf[i]);
797                 if (ret != 0) {
798                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
799                                 ret);
800                         goto err_alloc;
801                 }
802
803                 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
804                         reg = sub_irq_reg(d, d->chip->ack_base, i);
805                         if (chip->ack_invert)
806                                 ret = regmap_write(map, reg,
807                                         ~(d->status_buf[i] & d->mask_buf[i]));
808                         else
809                                 ret = regmap_write(map, reg,
810                                         d->status_buf[i] & d->mask_buf[i]);
811                         if (chip->clear_ack) {
812                                 if (chip->ack_invert && !ret)
813                                         ret = regmap_write(map, reg,
814                                                 (d->status_buf[i] &
815                                                  d->mask_buf[i]));
816                                 else if (!ret)
817                                         ret = regmap_write(map, reg,
818                                                 ~(d->status_buf[i] &
819                                                   d->mask_buf[i]));
820                         }
821                         if (ret != 0) {
822                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
823                                         reg, ret);
824                                 goto err_alloc;
825                         }
826                 }
827         }
828
829         /* Wake is disabled by default */
830         if (d->wake_buf) {
831                 for (i = 0; i < chip->num_regs; i++) {
832                         d->wake_buf[i] = d->mask_buf_def[i];
833                         reg = sub_irq_reg(d, d->chip->wake_base, i);
834
835                         if (chip->wake_invert)
836                                 ret = regmap_irq_update_bits(d, reg,
837                                                          d->mask_buf_def[i],
838                                                          0);
839                         else
840                                 ret = regmap_irq_update_bits(d, reg,
841                                                          d->mask_buf_def[i],
842                                                          d->wake_buf[i]);
843                         if (ret != 0) {
844                                 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
845                                         reg, ret);
846                                 goto err_alloc;
847                         }
848                 }
849         }
850
851         if (chip->num_type_reg && !chip->type_in_mask) {
852                 for (i = 0; i < chip->num_type_reg; ++i) {
853                         reg = sub_irq_reg(d, d->chip->type_base, i);
854
855                         ret = regmap_read(map, reg, &d->type_buf_def[i]);
856
857                         if (d->chip->type_invert)
858                                 d->type_buf_def[i] = ~d->type_buf_def[i];
859
860                         if (ret) {
861                                 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
862                                         reg, ret);
863                                 goto err_alloc;
864                         }
865                 }
866         }
867
868         if (irq_base)
869                 d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
870                                                      irq_base, 0,
871                                                      &regmap_domain_ops, d);
872         else
873                 d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
874                                                      &regmap_domain_ops, d);
875         if (!d->domain) {
876                 dev_err(map->dev, "Failed to create IRQ domain\n");
877                 ret = -ENOMEM;
878                 goto err_alloc;
879         }
880
881         ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
882                                    irq_flags | IRQF_ONESHOT,
883                                    chip->name, d);
884         if (ret != 0) {
885                 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
886                         irq, chip->name, ret);
887                 goto err_domain;
888         }
889
890         *data = d;
891
892         return 0;
893
894 err_domain:
895         /* Should really dispose of the domain but... */
896 err_alloc:
897         kfree(d->type_buf);
898         kfree(d->type_buf_def);
899         kfree(d->wake_buf);
900         kfree(d->mask_buf_def);
901         kfree(d->mask_buf);
902         kfree(d->status_buf);
903         kfree(d->status_reg_buf);
904         if (d->virt_buf) {
905                 for (i = 0; i < chip->num_virt_regs; i++)
906                         kfree(d->virt_buf[i]);
907                 kfree(d->virt_buf);
908         }
909         kfree(d);
910         return ret;
911 }
912 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
913
914 /**
915  * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
916  *
917  * @map: The regmap for the device.
918  * @irq: The IRQ the device uses to signal interrupts.
919  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
920  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
921  * @chip: Configuration for the interrupt controller.
922  * @data: Runtime data structure for the controller, allocated on success.
923  *
924  * Returns 0 on success or an errno on failure.
925  *
926  * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
927  * node of the regmap is used.
928  */
929 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
930                         int irq_base, const struct regmap_irq_chip *chip,
931                         struct regmap_irq_chip_data **data)
932 {
933         return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
934                                           irq_flags, irq_base, chip, data);
935 }
936 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
937
938 /**
939  * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
940  *
941  * @irq: Primary IRQ for the device
942  * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
943  *
944  * This function also disposes of all mapped IRQs on the chip.
945  */
946 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
947 {
948         unsigned int virq;
949         int hwirq;
950
951         if (!d)
952                 return;
953
954         free_irq(irq, d);
955
956         /* Dispose all virtual irq from irq domain before removing it */
957         for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
958                 /* Ignore hwirq if holes in the IRQ list */
959                 if (!d->chip->irqs[hwirq].mask)
960                         continue;
961
962                 /*
963                  * Find the virtual irq of hwirq on chip and if it is
964                  * there then dispose it
965                  */
966                 virq = irq_find_mapping(d->domain, hwirq);
967                 if (virq)
968                         irq_dispose_mapping(virq);
969         }
970
971         irq_domain_remove(d->domain);
972         kfree(d->type_buf);
973         kfree(d->type_buf_def);
974         kfree(d->wake_buf);
975         kfree(d->mask_buf_def);
976         kfree(d->mask_buf);
977         kfree(d->status_reg_buf);
978         kfree(d->status_buf);
979         kfree(d);
980 }
981 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
982
983 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
984 {
985         struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
986
987         regmap_del_irq_chip(d->irq, d);
988 }
989
990 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
991
992 {
993         struct regmap_irq_chip_data **r = res;
994
995         if (!r || !*r) {
996                 WARN_ON(!r || !*r);
997                 return 0;
998         }
999         return *r == data;
1000 }
1001
1002 /**
1003  * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
1004  *
1005  * @dev: The device pointer on which irq_chip belongs to.
1006  * @fwnode: The firmware node where the IRQ domain should be added to.
1007  * @map: The regmap for the device.
1008  * @irq: The IRQ the device uses to signal interrupts
1009  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1010  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1011  * @chip: Configuration for the interrupt controller.
1012  * @data: Runtime data structure for the controller, allocated on success
1013  *
1014  * Returns 0 on success or an errno on failure.
1015  *
1016  * The &regmap_irq_chip_data will be automatically released when the device is
1017  * unbound.
1018  */
1019 int devm_regmap_add_irq_chip_fwnode(struct device *dev,
1020                                     struct fwnode_handle *fwnode,
1021                                     struct regmap *map, int irq,
1022                                     int irq_flags, int irq_base,
1023                                     const struct regmap_irq_chip *chip,
1024                                     struct regmap_irq_chip_data **data)
1025 {
1026         struct regmap_irq_chip_data **ptr, *d;
1027         int ret;
1028
1029         ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
1030                            GFP_KERNEL);
1031         if (!ptr)
1032                 return -ENOMEM;
1033
1034         ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
1035                                          chip, &d);
1036         if (ret < 0) {
1037                 devres_free(ptr);
1038                 return ret;
1039         }
1040
1041         *ptr = d;
1042         devres_add(dev, ptr);
1043         *data = d;
1044         return 0;
1045 }
1046 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
1047
1048 /**
1049  * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
1050  *
1051  * @dev: The device pointer on which irq_chip belongs to.
1052  * @map: The regmap for the device.
1053  * @irq: The IRQ the device uses to signal interrupts
1054  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1055  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1056  * @chip: Configuration for the interrupt controller.
1057  * @data: Runtime data structure for the controller, allocated on success
1058  *
1059  * Returns 0 on success or an errno on failure.
1060  *
1061  * The &regmap_irq_chip_data will be automatically released when the device is
1062  * unbound.
1063  */
1064 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
1065                              int irq_flags, int irq_base,
1066                              const struct regmap_irq_chip *chip,
1067                              struct regmap_irq_chip_data **data)
1068 {
1069         return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
1070                                                irq, irq_flags, irq_base, chip,
1071                                                data);
1072 }
1073 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
1074
1075 /**
1076  * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1077  *
1078  * @dev: Device for which which resource was allocated.
1079  * @irq: Primary IRQ for the device.
1080  * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
1081  *
1082  * A resource managed version of regmap_del_irq_chip().
1083  */
1084 void devm_regmap_del_irq_chip(struct device *dev, int irq,
1085                               struct regmap_irq_chip_data *data)
1086 {
1087         int rc;
1088
1089         WARN_ON(irq != data->irq);
1090         rc = devres_release(dev, devm_regmap_irq_chip_release,
1091                             devm_regmap_irq_chip_match, data);
1092
1093         if (rc != 0)
1094                 WARN_ON(rc);
1095 }
1096 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
1097
1098 /**
1099  * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1100  *
1101  * @data: regmap irq controller to operate on.
1102  *
1103  * Useful for drivers to request their own IRQs.
1104  */
1105 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
1106 {
1107         WARN_ON(!data->irq_base);
1108         return data->irq_base;
1109 }
1110 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1111
1112 /**
1113  * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1114  *
1115  * @data: regmap irq controller to operate on.
1116  * @irq: index of the interrupt requested in the chip IRQs.
1117  *
1118  * Useful for drivers to request their own IRQs.
1119  */
1120 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
1121 {
1122         /* Handle holes in the IRQ list */
1123         if (!data->chip->irqs[irq].mask)
1124                 return -EINVAL;
1125
1126         return irq_create_mapping(data->domain, irq);
1127 }
1128 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1129
1130 /**
1131  * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1132  *
1133  * @data: regmap_irq controller to operate on.
1134  *
1135  * Useful for drivers to request their own IRQs and for integration
1136  * with subsystems.  For ease of integration NULL is accepted as a
1137  * domain, allowing devices to just call this even if no domain is
1138  * allocated.
1139  */
1140 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
1141 {
1142         if (data)
1143                 return data->domain;
1144         else
1145                 return NULL;
1146 }
1147 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);