Merge tag 'x86-entry-2021-06-29' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / base / regmap / regmap-irq.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap based irq_chip
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
17
18 #include "internal.h"
19
20 struct regmap_irq_chip_data {
21         struct mutex lock;
22         struct irq_chip irq_chip;
23
24         struct regmap *map;
25         const struct regmap_irq_chip *chip;
26
27         int irq_base;
28         struct irq_domain *domain;
29
30         int irq;
31         int wake_count;
32
33         void *status_reg_buf;
34         unsigned int *main_status_buf;
35         unsigned int *status_buf;
36         unsigned int *mask_buf;
37         unsigned int *mask_buf_def;
38         unsigned int *wake_buf;
39         unsigned int *type_buf;
40         unsigned int *type_buf_def;
41         unsigned int **virt_buf;
42
43         unsigned int irq_reg_stride;
44         unsigned int type_reg_stride;
45
46         bool clear_status:1;
47 };
48
49 static int sub_irq_reg(struct regmap_irq_chip_data *data,
50                        unsigned int base_reg, int i)
51 {
52         const struct regmap_irq_chip *chip = data->chip;
53         struct regmap *map = data->map;
54         struct regmap_irq_sub_irq_map *subreg;
55         unsigned int offset;
56         int reg = 0;
57
58         if (!chip->sub_reg_offsets || !chip->not_fixed_stride) {
59                 /* Assume linear mapping */
60                 reg = base_reg + (i * map->reg_stride * data->irq_reg_stride);
61         } else {
62                 subreg = &chip->sub_reg_offsets[i];
63                 offset = subreg->offset[0];
64                 reg = base_reg + offset;
65         }
66
67         return reg;
68 }
69
70 static inline const
71 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
72                                      int irq)
73 {
74         return &data->chip->irqs[irq];
75 }
76
77 static void regmap_irq_lock(struct irq_data *data)
78 {
79         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
80
81         mutex_lock(&d->lock);
82 }
83
84 static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
85                                   unsigned int reg, unsigned int mask,
86                                   unsigned int val)
87 {
88         if (d->chip->mask_writeonly)
89                 return regmap_write_bits(d->map, reg, mask, val);
90         else
91                 return regmap_update_bits(d->map, reg, mask, val);
92 }
93
94 static void regmap_irq_sync_unlock(struct irq_data *data)
95 {
96         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
97         struct regmap *map = d->map;
98         int i, j, ret;
99         u32 reg;
100         u32 unmask_offset;
101         u32 val;
102
103         if (d->chip->runtime_pm) {
104                 ret = pm_runtime_get_sync(map->dev);
105                 if (ret < 0)
106                         dev_err(map->dev, "IRQ sync failed to resume: %d\n",
107                                 ret);
108         }
109
110         if (d->clear_status) {
111                 for (i = 0; i < d->chip->num_regs; i++) {
112                         reg = sub_irq_reg(d, d->chip->status_base, i);
113
114                         ret = regmap_read(map, reg, &val);
115                         if (ret)
116                                 dev_err(d->map->dev,
117                                         "Failed to clear the interrupt status bits\n");
118                 }
119
120                 d->clear_status = false;
121         }
122
123         /*
124          * If there's been a change in the mask write it back to the
125          * hardware.  We rely on the use of the regmap core cache to
126          * suppress pointless writes.
127          */
128         for (i = 0; i < d->chip->num_regs; i++) {
129                 if (!d->chip->mask_base)
130                         continue;
131
132                 reg = sub_irq_reg(d, d->chip->mask_base, i);
133                 if (d->chip->mask_invert) {
134                         ret = regmap_irq_update_bits(d, reg,
135                                          d->mask_buf_def[i], ~d->mask_buf[i]);
136                 } else if (d->chip->unmask_base) {
137                         /* set mask with mask_base register */
138                         ret = regmap_irq_update_bits(d, reg,
139                                         d->mask_buf_def[i], ~d->mask_buf[i]);
140                         if (ret < 0)
141                                 dev_err(d->map->dev,
142                                         "Failed to sync unmasks in %x\n",
143                                         reg);
144                         unmask_offset = d->chip->unmask_base -
145                                                         d->chip->mask_base;
146                         /* clear mask with unmask_base register */
147                         ret = regmap_irq_update_bits(d,
148                                         reg + unmask_offset,
149                                         d->mask_buf_def[i],
150                                         d->mask_buf[i]);
151                 } else {
152                         ret = regmap_irq_update_bits(d, reg,
153                                          d->mask_buf_def[i], d->mask_buf[i]);
154                 }
155                 if (ret != 0)
156                         dev_err(d->map->dev, "Failed to sync masks in %x\n",
157                                 reg);
158
159                 reg = sub_irq_reg(d, d->chip->wake_base, i);
160                 if (d->wake_buf) {
161                         if (d->chip->wake_invert)
162                                 ret = regmap_irq_update_bits(d, reg,
163                                                          d->mask_buf_def[i],
164                                                          ~d->wake_buf[i]);
165                         else
166                                 ret = regmap_irq_update_bits(d, reg,
167                                                          d->mask_buf_def[i],
168                                                          d->wake_buf[i]);
169                         if (ret != 0)
170                                 dev_err(d->map->dev,
171                                         "Failed to sync wakes in %x: %d\n",
172                                         reg, ret);
173                 }
174
175                 if (!d->chip->init_ack_masked)
176                         continue;
177                 /*
178                  * Ack all the masked interrupts unconditionally,
179                  * OR if there is masked interrupt which hasn't been Acked,
180                  * it'll be ignored in irq handler, then may introduce irq storm
181                  */
182                 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
183                         reg = sub_irq_reg(d, d->chip->ack_base, i);
184
185                         /* some chips ack by write 0 */
186                         if (d->chip->ack_invert)
187                                 ret = regmap_write(map, reg, ~d->mask_buf[i]);
188                         else
189                                 ret = regmap_write(map, reg, d->mask_buf[i]);
190                         if (d->chip->clear_ack) {
191                                 if (d->chip->ack_invert && !ret)
192                                         ret = regmap_write(map, reg,
193                                                            d->mask_buf[i]);
194                                 else if (!ret)
195                                         ret = regmap_write(map, reg,
196                                                            ~d->mask_buf[i]);
197                         }
198                         if (ret != 0)
199                                 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
200                                         reg, ret);
201                 }
202         }
203
204         /* Don't update the type bits if we're using mask bits for irq type. */
205         if (!d->chip->type_in_mask) {
206                 for (i = 0; i < d->chip->num_type_reg; i++) {
207                         if (!d->type_buf_def[i])
208                                 continue;
209                         reg = sub_irq_reg(d, d->chip->type_base, i);
210                         if (d->chip->type_invert)
211                                 ret = regmap_irq_update_bits(d, reg,
212                                         d->type_buf_def[i], ~d->type_buf[i]);
213                         else
214                                 ret = regmap_irq_update_bits(d, reg,
215                                         d->type_buf_def[i], d->type_buf[i]);
216                         if (ret != 0)
217                                 dev_err(d->map->dev, "Failed to sync type in %x\n",
218                                         reg);
219                 }
220         }
221
222         if (d->chip->num_virt_regs) {
223                 for (i = 0; i < d->chip->num_virt_regs; i++) {
224                         for (j = 0; j < d->chip->num_regs; j++) {
225                                 reg = sub_irq_reg(d, d->chip->virt_reg_base[i],
226                                                   j);
227                                 ret = regmap_write(map, reg, d->virt_buf[i][j]);
228                                 if (ret != 0)
229                                         dev_err(d->map->dev,
230                                                 "Failed to write virt 0x%x: %d\n",
231                                                 reg, ret);
232                         }
233                 }
234         }
235
236         if (d->chip->runtime_pm)
237                 pm_runtime_put(map->dev);
238
239         /* If we've changed our wakeup count propagate it to the parent */
240         if (d->wake_count < 0)
241                 for (i = d->wake_count; i < 0; i++)
242                         irq_set_irq_wake(d->irq, 0);
243         else if (d->wake_count > 0)
244                 for (i = 0; i < d->wake_count; i++)
245                         irq_set_irq_wake(d->irq, 1);
246
247         d->wake_count = 0;
248
249         mutex_unlock(&d->lock);
250 }
251
252 static void regmap_irq_enable(struct irq_data *data)
253 {
254         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
255         struct regmap *map = d->map;
256         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
257         unsigned int mask, type;
258
259         type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
260
261         /*
262          * The type_in_mask flag means that the underlying hardware uses
263          * separate mask bits for rising and falling edge interrupts, but
264          * we want to make them into a single virtual interrupt with
265          * configurable edge.
266          *
267          * If the interrupt we're enabling defines the falling or rising
268          * masks then instead of using the regular mask bits for this
269          * interrupt, use the value previously written to the type buffer
270          * at the corresponding offset in regmap_irq_set_type().
271          */
272         if (d->chip->type_in_mask && type)
273                 mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
274         else
275                 mask = irq_data->mask;
276
277         if (d->chip->clear_on_unmask)
278                 d->clear_status = true;
279
280         d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
281 }
282
283 static void regmap_irq_disable(struct irq_data *data)
284 {
285         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
286         struct regmap *map = d->map;
287         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
288
289         d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
290 }
291
292 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
293 {
294         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
295         struct regmap *map = d->map;
296         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
297         int reg;
298         const struct regmap_irq_type *t = &irq_data->type;
299
300         if ((t->types_supported & type) != type)
301                 return 0;
302
303         reg = t->type_reg_offset / map->reg_stride;
304
305         if (t->type_reg_mask)
306                 d->type_buf[reg] &= ~t->type_reg_mask;
307         else
308                 d->type_buf[reg] &= ~(t->type_falling_val |
309                                       t->type_rising_val |
310                                       t->type_level_low_val |
311                                       t->type_level_high_val);
312         switch (type) {
313         case IRQ_TYPE_EDGE_FALLING:
314                 d->type_buf[reg] |= t->type_falling_val;
315                 break;
316
317         case IRQ_TYPE_EDGE_RISING:
318                 d->type_buf[reg] |= t->type_rising_val;
319                 break;
320
321         case IRQ_TYPE_EDGE_BOTH:
322                 d->type_buf[reg] |= (t->type_falling_val |
323                                         t->type_rising_val);
324                 break;
325
326         case IRQ_TYPE_LEVEL_HIGH:
327                 d->type_buf[reg] |= t->type_level_high_val;
328                 break;
329
330         case IRQ_TYPE_LEVEL_LOW:
331                 d->type_buf[reg] |= t->type_level_low_val;
332                 break;
333         default:
334                 return -EINVAL;
335         }
336
337         if (d->chip->set_type_virt)
338                 return d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
339                                               reg);
340
341         return 0;
342 }
343
344 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
345 {
346         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
347         struct regmap *map = d->map;
348         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
349
350         if (on) {
351                 if (d->wake_buf)
352                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
353                                 &= ~irq_data->mask;
354                 d->wake_count++;
355         } else {
356                 if (d->wake_buf)
357                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
358                                 |= irq_data->mask;
359                 d->wake_count--;
360         }
361
362         return 0;
363 }
364
365 static const struct irq_chip regmap_irq_chip = {
366         .irq_bus_lock           = regmap_irq_lock,
367         .irq_bus_sync_unlock    = regmap_irq_sync_unlock,
368         .irq_disable            = regmap_irq_disable,
369         .irq_enable             = regmap_irq_enable,
370         .irq_set_type           = regmap_irq_set_type,
371         .irq_set_wake           = regmap_irq_set_wake,
372 };
373
374 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
375                                            unsigned int b)
376 {
377         const struct regmap_irq_chip *chip = data->chip;
378         struct regmap *map = data->map;
379         struct regmap_irq_sub_irq_map *subreg;
380         int i, ret = 0;
381
382         if (!chip->sub_reg_offsets) {
383                 /* Assume linear mapping */
384                 ret = regmap_read(map, chip->status_base +
385                                   (b * map->reg_stride * data->irq_reg_stride),
386                                    &data->status_buf[b]);
387         } else {
388                 subreg = &chip->sub_reg_offsets[b];
389                 for (i = 0; i < subreg->num_regs; i++) {
390                         unsigned int offset = subreg->offset[i];
391
392                         if (chip->not_fixed_stride)
393                                 ret = regmap_read(map,
394                                                 chip->status_base + offset,
395                                                 &data->status_buf[b]);
396                         else
397                                 ret = regmap_read(map,
398                                                 chip->status_base + offset,
399                                                 &data->status_buf[offset]);
400
401                         if (ret)
402                                 break;
403                 }
404         }
405         return ret;
406 }
407
408 static irqreturn_t regmap_irq_thread(int irq, void *d)
409 {
410         struct regmap_irq_chip_data *data = d;
411         const struct regmap_irq_chip *chip = data->chip;
412         struct regmap *map = data->map;
413         int ret, i;
414         bool handled = false;
415         u32 reg;
416
417         if (chip->handle_pre_irq)
418                 chip->handle_pre_irq(chip->irq_drv_data);
419
420         if (chip->runtime_pm) {
421                 ret = pm_runtime_get_sync(map->dev);
422                 if (ret < 0) {
423                         dev_err(map->dev, "IRQ thread failed to resume: %d\n",
424                                 ret);
425                         goto exit;
426                 }
427         }
428
429         /*
430          * Read only registers with active IRQs if the chip has 'main status
431          * register'. Else read in the statuses, using a single bulk read if
432          * possible in order to reduce the I/O overheads.
433          */
434
435         if (chip->num_main_regs) {
436                 unsigned int max_main_bits;
437                 unsigned long size;
438
439                 size = chip->num_regs * sizeof(unsigned int);
440
441                 max_main_bits = (chip->num_main_status_bits) ?
442                                  chip->num_main_status_bits : chip->num_regs;
443                 /* Clear the status buf as we don't read all status regs */
444                 memset(data->status_buf, 0, size);
445
446                 /* We could support bulk read for main status registers
447                  * but I don't expect to see devices with really many main
448                  * status registers so let's only support single reads for the
449                  * sake of simplicity. and add bulk reads only if needed
450                  */
451                 for (i = 0; i < chip->num_main_regs; i++) {
452                         ret = regmap_read(map, chip->main_status +
453                                   (i * map->reg_stride
454                                    * data->irq_reg_stride),
455                                   &data->main_status_buf[i]);
456                         if (ret) {
457                                 dev_err(map->dev,
458                                         "Failed to read IRQ status %d\n",
459                                         ret);
460                                 goto exit;
461                         }
462                 }
463
464                 /* Read sub registers with active IRQs */
465                 for (i = 0; i < chip->num_main_regs; i++) {
466                         unsigned int b;
467                         const unsigned long mreg = data->main_status_buf[i];
468
469                         for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
470                                 if (i * map->format.val_bytes * 8 + b >
471                                     max_main_bits)
472                                         break;
473                                 ret = read_sub_irq_data(data, b);
474
475                                 if (ret != 0) {
476                                         dev_err(map->dev,
477                                                 "Failed to read IRQ status %d\n",
478                                                 ret);
479                                         goto exit;
480                                 }
481                         }
482
483                 }
484         } else if (!map->use_single_read && map->reg_stride == 1 &&
485                    data->irq_reg_stride == 1) {
486
487                 u8 *buf8 = data->status_reg_buf;
488                 u16 *buf16 = data->status_reg_buf;
489                 u32 *buf32 = data->status_reg_buf;
490
491                 BUG_ON(!data->status_reg_buf);
492
493                 ret = regmap_bulk_read(map, chip->status_base,
494                                        data->status_reg_buf,
495                                        chip->num_regs);
496                 if (ret != 0) {
497                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
498                                 ret);
499                         goto exit;
500                 }
501
502                 for (i = 0; i < data->chip->num_regs; i++) {
503                         switch (map->format.val_bytes) {
504                         case 1:
505                                 data->status_buf[i] = buf8[i];
506                                 break;
507                         case 2:
508                                 data->status_buf[i] = buf16[i];
509                                 break;
510                         case 4:
511                                 data->status_buf[i] = buf32[i];
512                                 break;
513                         default:
514                                 BUG();
515                                 goto exit;
516                         }
517                 }
518
519         } else {
520                 for (i = 0; i < data->chip->num_regs; i++) {
521                         unsigned int reg = sub_irq_reg(data,
522                                         data->chip->status_base, i);
523                         ret = regmap_read(map, reg, &data->status_buf[i]);
524
525                         if (ret != 0) {
526                                 dev_err(map->dev,
527                                         "Failed to read IRQ status: %d\n",
528                                         ret);
529                                 goto exit;
530                         }
531                 }
532         }
533
534         if (chip->status_invert)
535                 for (i = 0; i < data->chip->num_regs; i++)
536                         data->status_buf[i] = ~data->status_buf[i];
537
538         /*
539          * Ignore masked IRQs and ack if we need to; we ack early so
540          * there is no race between handling and acknowleding the
541          * interrupt.  We assume that typically few of the interrupts
542          * will fire simultaneously so don't worry about overhead from
543          * doing a write per register.
544          */
545         for (i = 0; i < data->chip->num_regs; i++) {
546                 data->status_buf[i] &= ~data->mask_buf[i];
547
548                 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
549                         reg = sub_irq_reg(data, data->chip->ack_base, i);
550
551                         if (chip->ack_invert)
552                                 ret = regmap_write(map, reg,
553                                                 ~data->status_buf[i]);
554                         else
555                                 ret = regmap_write(map, reg,
556                                                 data->status_buf[i]);
557                         if (chip->clear_ack) {
558                                 if (chip->ack_invert && !ret)
559                                         ret = regmap_write(map, reg,
560                                                         data->status_buf[i]);
561                                 else if (!ret)
562                                         ret = regmap_write(map, reg,
563                                                         ~data->status_buf[i]);
564                         }
565                         if (ret != 0)
566                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
567                                         reg, ret);
568                 }
569         }
570
571         for (i = 0; i < chip->num_irqs; i++) {
572                 if (data->status_buf[chip->irqs[i].reg_offset /
573                                      map->reg_stride] & chip->irqs[i].mask) {
574                         handle_nested_irq(irq_find_mapping(data->domain, i));
575                         handled = true;
576                 }
577         }
578
579 exit:
580         if (chip->runtime_pm)
581                 pm_runtime_put(map->dev);
582
583         if (chip->handle_post_irq)
584                 chip->handle_post_irq(chip->irq_drv_data);
585
586         if (handled)
587                 return IRQ_HANDLED;
588         else
589                 return IRQ_NONE;
590 }
591
592 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
593                           irq_hw_number_t hw)
594 {
595         struct regmap_irq_chip_data *data = h->host_data;
596
597         irq_set_chip_data(virq, data);
598         irq_set_chip(virq, &data->irq_chip);
599         irq_set_nested_thread(virq, 1);
600         irq_set_parent(virq, data->irq);
601         irq_set_noprobe(virq);
602
603         return 0;
604 }
605
606 static const struct irq_domain_ops regmap_domain_ops = {
607         .map    = regmap_irq_map,
608         .xlate  = irq_domain_xlate_onetwocell,
609 };
610
611 /**
612  * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
613  *
614  * @fwnode: The firmware node where the IRQ domain should be added to.
615  * @map: The regmap for the device.
616  * @irq: The IRQ the device uses to signal interrupts.
617  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
618  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
619  * @chip: Configuration for the interrupt controller.
620  * @data: Runtime data structure for the controller, allocated on success.
621  *
622  * Returns 0 on success or an errno on failure.
623  *
624  * In order for this to be efficient the chip really should use a
625  * register cache.  The chip driver is responsible for restoring the
626  * register values used by the IRQ controller over suspend and resume.
627  */
628 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
629                                struct regmap *map, int irq,
630                                int irq_flags, int irq_base,
631                                const struct regmap_irq_chip *chip,
632                                struct regmap_irq_chip_data **data)
633 {
634         struct regmap_irq_chip_data *d;
635         int i;
636         int ret = -ENOMEM;
637         int num_type_reg;
638         u32 reg;
639         u32 unmask_offset;
640
641         if (chip->num_regs <= 0)
642                 return -EINVAL;
643
644         if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
645                 return -EINVAL;
646
647         for (i = 0; i < chip->num_irqs; i++) {
648                 if (chip->irqs[i].reg_offset % map->reg_stride)
649                         return -EINVAL;
650                 if (chip->irqs[i].reg_offset / map->reg_stride >=
651                     chip->num_regs)
652                         return -EINVAL;
653         }
654
655         if (chip->not_fixed_stride) {
656                 for (i = 0; i < chip->num_regs; i++)
657                         if (chip->sub_reg_offsets[i].num_regs != 1)
658                                 return -EINVAL;
659         }
660
661         if (irq_base) {
662                 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
663                 if (irq_base < 0) {
664                         dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
665                                  irq_base);
666                         return irq_base;
667                 }
668         }
669
670         d = kzalloc(sizeof(*d), GFP_KERNEL);
671         if (!d)
672                 return -ENOMEM;
673
674         if (chip->num_main_regs) {
675                 d->main_status_buf = kcalloc(chip->num_main_regs,
676                                              sizeof(unsigned int),
677                                              GFP_KERNEL);
678
679                 if (!d->main_status_buf)
680                         goto err_alloc;
681         }
682
683         d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
684                                 GFP_KERNEL);
685         if (!d->status_buf)
686                 goto err_alloc;
687
688         d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
689                               GFP_KERNEL);
690         if (!d->mask_buf)
691                 goto err_alloc;
692
693         d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
694                                   GFP_KERNEL);
695         if (!d->mask_buf_def)
696                 goto err_alloc;
697
698         if (chip->wake_base) {
699                 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
700                                       GFP_KERNEL);
701                 if (!d->wake_buf)
702                         goto err_alloc;
703         }
704
705         num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
706         if (num_type_reg) {
707                 d->type_buf_def = kcalloc(num_type_reg,
708                                           sizeof(unsigned int), GFP_KERNEL);
709                 if (!d->type_buf_def)
710                         goto err_alloc;
711
712                 d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
713                                       GFP_KERNEL);
714                 if (!d->type_buf)
715                         goto err_alloc;
716         }
717
718         if (chip->num_virt_regs) {
719                 /*
720                  * Create virt_buf[chip->num_extra_config_regs][chip->num_regs]
721                  */
722                 d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf),
723                                       GFP_KERNEL);
724                 if (!d->virt_buf)
725                         goto err_alloc;
726
727                 for (i = 0; i < chip->num_virt_regs; i++) {
728                         d->virt_buf[i] = kcalloc(chip->num_regs,
729                                                  sizeof(unsigned int),
730                                                  GFP_KERNEL);
731                         if (!d->virt_buf[i])
732                                 goto err_alloc;
733                 }
734         }
735
736         d->irq_chip = regmap_irq_chip;
737         d->irq_chip.name = chip->name;
738         d->irq = irq;
739         d->map = map;
740         d->chip = chip;
741         d->irq_base = irq_base;
742
743         if (chip->irq_reg_stride)
744                 d->irq_reg_stride = chip->irq_reg_stride;
745         else
746                 d->irq_reg_stride = 1;
747
748         if (chip->type_reg_stride)
749                 d->type_reg_stride = chip->type_reg_stride;
750         else
751                 d->type_reg_stride = 1;
752
753         if (!map->use_single_read && map->reg_stride == 1 &&
754             d->irq_reg_stride == 1) {
755                 d->status_reg_buf = kmalloc_array(chip->num_regs,
756                                                   map->format.val_bytes,
757                                                   GFP_KERNEL);
758                 if (!d->status_reg_buf)
759                         goto err_alloc;
760         }
761
762         mutex_init(&d->lock);
763
764         for (i = 0; i < chip->num_irqs; i++)
765                 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
766                         |= chip->irqs[i].mask;
767
768         /* Mask all the interrupts by default */
769         for (i = 0; i < chip->num_regs; i++) {
770                 d->mask_buf[i] = d->mask_buf_def[i];
771                 if (!chip->mask_base)
772                         continue;
773
774                 reg = sub_irq_reg(d, d->chip->mask_base, i);
775
776                 if (chip->mask_invert)
777                         ret = regmap_irq_update_bits(d, reg,
778                                          d->mask_buf[i], ~d->mask_buf[i]);
779                 else if (d->chip->unmask_base) {
780                         unmask_offset = d->chip->unmask_base -
781                                         d->chip->mask_base;
782                         ret = regmap_irq_update_bits(d,
783                                         reg + unmask_offset,
784                                         d->mask_buf[i],
785                                         d->mask_buf[i]);
786                 } else
787                         ret = regmap_irq_update_bits(d, reg,
788                                          d->mask_buf[i], d->mask_buf[i]);
789                 if (ret != 0) {
790                         dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
791                                 reg, ret);
792                         goto err_alloc;
793                 }
794
795                 if (!chip->init_ack_masked)
796                         continue;
797
798                 /* Ack masked but set interrupts */
799                 reg = sub_irq_reg(d, d->chip->status_base, i);
800                 ret = regmap_read(map, reg, &d->status_buf[i]);
801                 if (ret != 0) {
802                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
803                                 ret);
804                         goto err_alloc;
805                 }
806
807                 if (chip->status_invert)
808                         d->status_buf[i] = ~d->status_buf[i];
809
810                 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
811                         reg = sub_irq_reg(d, d->chip->ack_base, i);
812                         if (chip->ack_invert)
813                                 ret = regmap_write(map, reg,
814                                         ~(d->status_buf[i] & d->mask_buf[i]));
815                         else
816                                 ret = regmap_write(map, reg,
817                                         d->status_buf[i] & d->mask_buf[i]);
818                         if (chip->clear_ack) {
819                                 if (chip->ack_invert && !ret)
820                                         ret = regmap_write(map, reg,
821                                                 (d->status_buf[i] &
822                                                  d->mask_buf[i]));
823                                 else if (!ret)
824                                         ret = regmap_write(map, reg,
825                                                 ~(d->status_buf[i] &
826                                                   d->mask_buf[i]));
827                         }
828                         if (ret != 0) {
829                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
830                                         reg, ret);
831                                 goto err_alloc;
832                         }
833                 }
834         }
835
836         /* Wake is disabled by default */
837         if (d->wake_buf) {
838                 for (i = 0; i < chip->num_regs; i++) {
839                         d->wake_buf[i] = d->mask_buf_def[i];
840                         reg = sub_irq_reg(d, d->chip->wake_base, i);
841
842                         if (chip->wake_invert)
843                                 ret = regmap_irq_update_bits(d, reg,
844                                                          d->mask_buf_def[i],
845                                                          0);
846                         else
847                                 ret = regmap_irq_update_bits(d, reg,
848                                                          d->mask_buf_def[i],
849                                                          d->wake_buf[i]);
850                         if (ret != 0) {
851                                 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
852                                         reg, ret);
853                                 goto err_alloc;
854                         }
855                 }
856         }
857
858         if (chip->num_type_reg && !chip->type_in_mask) {
859                 for (i = 0; i < chip->num_type_reg; ++i) {
860                         reg = sub_irq_reg(d, d->chip->type_base, i);
861
862                         ret = regmap_read(map, reg, &d->type_buf_def[i]);
863
864                         if (d->chip->type_invert)
865                                 d->type_buf_def[i] = ~d->type_buf_def[i];
866
867                         if (ret) {
868                                 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
869                                         reg, ret);
870                                 goto err_alloc;
871                         }
872                 }
873         }
874
875         if (irq_base)
876                 d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
877                                                      irq_base, 0,
878                                                      &regmap_domain_ops, d);
879         else
880                 d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
881                                                      &regmap_domain_ops, d);
882         if (!d->domain) {
883                 dev_err(map->dev, "Failed to create IRQ domain\n");
884                 ret = -ENOMEM;
885                 goto err_alloc;
886         }
887
888         ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
889                                    irq_flags | IRQF_ONESHOT,
890                                    chip->name, d);
891         if (ret != 0) {
892                 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
893                         irq, chip->name, ret);
894                 goto err_domain;
895         }
896
897         *data = d;
898
899         return 0;
900
901 err_domain:
902         /* Should really dispose of the domain but... */
903 err_alloc:
904         kfree(d->type_buf);
905         kfree(d->type_buf_def);
906         kfree(d->wake_buf);
907         kfree(d->mask_buf_def);
908         kfree(d->mask_buf);
909         kfree(d->status_buf);
910         kfree(d->status_reg_buf);
911         if (d->virt_buf) {
912                 for (i = 0; i < chip->num_virt_regs; i++)
913                         kfree(d->virt_buf[i]);
914                 kfree(d->virt_buf);
915         }
916         kfree(d);
917         return ret;
918 }
919 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
920
921 /**
922  * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
923  *
924  * @map: The regmap for the device.
925  * @irq: The IRQ the device uses to signal interrupts.
926  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
927  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
928  * @chip: Configuration for the interrupt controller.
929  * @data: Runtime data structure for the controller, allocated on success.
930  *
931  * Returns 0 on success or an errno on failure.
932  *
933  * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
934  * node of the regmap is used.
935  */
936 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
937                         int irq_base, const struct regmap_irq_chip *chip,
938                         struct regmap_irq_chip_data **data)
939 {
940         return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
941                                           irq_flags, irq_base, chip, data);
942 }
943 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
944
945 /**
946  * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
947  *
948  * @irq: Primary IRQ for the device
949  * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
950  *
951  * This function also disposes of all mapped IRQs on the chip.
952  */
953 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
954 {
955         unsigned int virq;
956         int hwirq;
957
958         if (!d)
959                 return;
960
961         free_irq(irq, d);
962
963         /* Dispose all virtual irq from irq domain before removing it */
964         for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
965                 /* Ignore hwirq if holes in the IRQ list */
966                 if (!d->chip->irqs[hwirq].mask)
967                         continue;
968
969                 /*
970                  * Find the virtual irq of hwirq on chip and if it is
971                  * there then dispose it
972                  */
973                 virq = irq_find_mapping(d->domain, hwirq);
974                 if (virq)
975                         irq_dispose_mapping(virq);
976         }
977
978         irq_domain_remove(d->domain);
979         kfree(d->type_buf);
980         kfree(d->type_buf_def);
981         kfree(d->wake_buf);
982         kfree(d->mask_buf_def);
983         kfree(d->mask_buf);
984         kfree(d->status_reg_buf);
985         kfree(d->status_buf);
986         kfree(d);
987 }
988 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
989
990 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
991 {
992         struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
993
994         regmap_del_irq_chip(d->irq, d);
995 }
996
997 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
998
999 {
1000         struct regmap_irq_chip_data **r = res;
1001
1002         if (!r || !*r) {
1003                 WARN_ON(!r || !*r);
1004                 return 0;
1005         }
1006         return *r == data;
1007 }
1008
1009 /**
1010  * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
1011  *
1012  * @dev: The device pointer on which irq_chip belongs to.
1013  * @fwnode: The firmware node where the IRQ domain should be added to.
1014  * @map: The regmap for the device.
1015  * @irq: The IRQ the device uses to signal interrupts
1016  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1017  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1018  * @chip: Configuration for the interrupt controller.
1019  * @data: Runtime data structure for the controller, allocated on success
1020  *
1021  * Returns 0 on success or an errno on failure.
1022  *
1023  * The &regmap_irq_chip_data will be automatically released when the device is
1024  * unbound.
1025  */
1026 int devm_regmap_add_irq_chip_fwnode(struct device *dev,
1027                                     struct fwnode_handle *fwnode,
1028                                     struct regmap *map, int irq,
1029                                     int irq_flags, int irq_base,
1030                                     const struct regmap_irq_chip *chip,
1031                                     struct regmap_irq_chip_data **data)
1032 {
1033         struct regmap_irq_chip_data **ptr, *d;
1034         int ret;
1035
1036         ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
1037                            GFP_KERNEL);
1038         if (!ptr)
1039                 return -ENOMEM;
1040
1041         ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
1042                                          chip, &d);
1043         if (ret < 0) {
1044                 devres_free(ptr);
1045                 return ret;
1046         }
1047
1048         *ptr = d;
1049         devres_add(dev, ptr);
1050         *data = d;
1051         return 0;
1052 }
1053 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
1054
1055 /**
1056  * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
1057  *
1058  * @dev: The device pointer on which irq_chip belongs to.
1059  * @map: The regmap for the device.
1060  * @irq: The IRQ the device uses to signal interrupts
1061  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1062  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1063  * @chip: Configuration for the interrupt controller.
1064  * @data: Runtime data structure for the controller, allocated on success
1065  *
1066  * Returns 0 on success or an errno on failure.
1067  *
1068  * The &regmap_irq_chip_data will be automatically released when the device is
1069  * unbound.
1070  */
1071 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
1072                              int irq_flags, int irq_base,
1073                              const struct regmap_irq_chip *chip,
1074                              struct regmap_irq_chip_data **data)
1075 {
1076         return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
1077                                                irq, irq_flags, irq_base, chip,
1078                                                data);
1079 }
1080 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
1081
1082 /**
1083  * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1084  *
1085  * @dev: Device for which which resource was allocated.
1086  * @irq: Primary IRQ for the device.
1087  * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
1088  *
1089  * A resource managed version of regmap_del_irq_chip().
1090  */
1091 void devm_regmap_del_irq_chip(struct device *dev, int irq,
1092                               struct regmap_irq_chip_data *data)
1093 {
1094         int rc;
1095
1096         WARN_ON(irq != data->irq);
1097         rc = devres_release(dev, devm_regmap_irq_chip_release,
1098                             devm_regmap_irq_chip_match, data);
1099
1100         if (rc != 0)
1101                 WARN_ON(rc);
1102 }
1103 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
1104
1105 /**
1106  * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1107  *
1108  * @data: regmap irq controller to operate on.
1109  *
1110  * Useful for drivers to request their own IRQs.
1111  */
1112 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
1113 {
1114         WARN_ON(!data->irq_base);
1115         return data->irq_base;
1116 }
1117 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1118
1119 /**
1120  * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1121  *
1122  * @data: regmap irq controller to operate on.
1123  * @irq: index of the interrupt requested in the chip IRQs.
1124  *
1125  * Useful for drivers to request their own IRQs.
1126  */
1127 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
1128 {
1129         /* Handle holes in the IRQ list */
1130         if (!data->chip->irqs[irq].mask)
1131                 return -EINVAL;
1132
1133         return irq_create_mapping(data->domain, irq);
1134 }
1135 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1136
1137 /**
1138  * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1139  *
1140  * @data: regmap_irq controller to operate on.
1141  *
1142  * Useful for drivers to request their own IRQs and for integration
1143  * with subsystems.  For ease of integration NULL is accepted as a
1144  * domain, allowing devices to just call this even if no domain is
1145  * allocated.
1146  */
1147 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
1148 {
1149         if (data)
1150                 return data->domain;
1151         else
1152                 return NULL;
1153 }
1154 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);