Merge tag 'bootconfig-fixes-v6.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_hw_interrupts.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8
9 #include <drm/drm_managed.h>
10
11 #include "dpu_core_irq.h"
12 #include "dpu_kms.h"
13 #include "dpu_hw_interrupts.h"
14 #include "dpu_hw_util.h"
15 #include "dpu_hw_mdss.h"
16 #include "dpu_trace.h"
17
18 /*
19  * Register offsets in MDSS register file for the interrupt registers
20  * w.r.t. the MDP base
21  */
22 #define MDP_INTF_OFF(intf)                              (0x6A000 + 0x800 * (intf))
23 #define MDP_INTF_INTR_EN(intf)                          (MDP_INTF_OFF(intf) + 0x1c0)
24 #define MDP_INTF_INTR_STATUS(intf)                      (MDP_INTF_OFF(intf) + 0x1c4)
25 #define MDP_INTF_INTR_CLEAR(intf)                       (MDP_INTF_OFF(intf) + 0x1c8)
26 #define MDP_INTF_TEAR_OFF(intf)                         (0x6D700 + 0x100 * (intf))
27 #define MDP_INTF_INTR_TEAR_EN(intf)                     (MDP_INTF_TEAR_OFF(intf) + 0x000)
28 #define MDP_INTF_INTR_TEAR_STATUS(intf)                 (MDP_INTF_TEAR_OFF(intf) + 0x004)
29 #define MDP_INTF_INTR_TEAR_CLEAR(intf)                  (MDP_INTF_TEAR_OFF(intf) + 0x008)
30 #define MDP_AD4_OFF(ad4)                                (0x7C000 + 0x1000 * (ad4))
31 #define MDP_AD4_INTR_EN_OFF(ad4)                        (MDP_AD4_OFF(ad4) + 0x41c)
32 #define MDP_AD4_INTR_CLEAR_OFF(ad4)                     (MDP_AD4_OFF(ad4) + 0x424)
33 #define MDP_AD4_INTR_STATUS_OFF(ad4)                    (MDP_AD4_OFF(ad4) + 0x420)
34 #define MDP_INTF_REV_7xxx_OFF(intf)                     (0x34000 + 0x1000 * (intf))
35 #define MDP_INTF_REV_7xxx_INTR_EN(intf)                 (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
36 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf)             (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
37 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf)              (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
38 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf)                (0x34800 + 0x1000 * (intf))
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf)            (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
40 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf)        (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
41 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf)         (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
42
43 /**
44  * struct dpu_intr_reg - array of DPU register sets
45  * @clr_off:    offset to CLEAR reg
46  * @en_off:     offset to ENABLE reg
47  * @status_off: offset to STATUS reg
48  */
49 struct dpu_intr_reg {
50         u32 clr_off;
51         u32 en_off;
52         u32 status_off;
53 };
54
55 /*
56  * dpu_intr_set_legacy -  List of DPU interrupt registers for DPU <= 6.x
57  */
58 static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
59         [MDP_SSPP_TOP0_INTR] = {
60                 INTR_CLEAR,
61                 INTR_EN,
62                 INTR_STATUS
63         },
64         [MDP_SSPP_TOP0_INTR2] = {
65                 INTR2_CLEAR,
66                 INTR2_EN,
67                 INTR2_STATUS
68         },
69         [MDP_SSPP_TOP0_HIST_INTR] = {
70                 HIST_INTR_CLEAR,
71                 HIST_INTR_EN,
72                 HIST_INTR_STATUS
73         },
74         [MDP_INTF0_INTR] = {
75                 MDP_INTF_INTR_CLEAR(0),
76                 MDP_INTF_INTR_EN(0),
77                 MDP_INTF_INTR_STATUS(0)
78         },
79         [MDP_INTF1_INTR] = {
80                 MDP_INTF_INTR_CLEAR(1),
81                 MDP_INTF_INTR_EN(1),
82                 MDP_INTF_INTR_STATUS(1)
83         },
84         [MDP_INTF2_INTR] = {
85                 MDP_INTF_INTR_CLEAR(2),
86                 MDP_INTF_INTR_EN(2),
87                 MDP_INTF_INTR_STATUS(2)
88         },
89         [MDP_INTF3_INTR] = {
90                 MDP_INTF_INTR_CLEAR(3),
91                 MDP_INTF_INTR_EN(3),
92                 MDP_INTF_INTR_STATUS(3)
93         },
94         [MDP_INTF4_INTR] = {
95                 MDP_INTF_INTR_CLEAR(4),
96                 MDP_INTF_INTR_EN(4),
97                 MDP_INTF_INTR_STATUS(4)
98         },
99         [MDP_INTF5_INTR] = {
100                 MDP_INTF_INTR_CLEAR(5),
101                 MDP_INTF_INTR_EN(5),
102                 MDP_INTF_INTR_STATUS(5)
103         },
104         [MDP_INTF1_TEAR_INTR] = {
105                 MDP_INTF_INTR_TEAR_CLEAR(1),
106                 MDP_INTF_INTR_TEAR_EN(1),
107                 MDP_INTF_INTR_TEAR_STATUS(1)
108         },
109         [MDP_INTF2_TEAR_INTR] = {
110                 MDP_INTF_INTR_TEAR_CLEAR(2),
111                 MDP_INTF_INTR_TEAR_EN(2),
112                 MDP_INTF_INTR_TEAR_STATUS(2)
113         },
114         [MDP_AD4_0_INTR] = {
115                 MDP_AD4_INTR_CLEAR_OFF(0),
116                 MDP_AD4_INTR_EN_OFF(0),
117                 MDP_AD4_INTR_STATUS_OFF(0),
118         },
119         [MDP_AD4_1_INTR] = {
120                 MDP_AD4_INTR_CLEAR_OFF(1),
121                 MDP_AD4_INTR_EN_OFF(1),
122                 MDP_AD4_INTR_STATUS_OFF(1),
123         },
124 };
125
126 /*
127  * dpu_intr_set_7xxx -  List of DPU interrupt registers for DPU >= 7.0
128  */
129 static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
130         [MDP_SSPP_TOP0_INTR] = {
131                 INTR_CLEAR,
132                 INTR_EN,
133                 INTR_STATUS
134         },
135         [MDP_SSPP_TOP0_INTR2] = {
136                 INTR2_CLEAR,
137                 INTR2_EN,
138                 INTR2_STATUS
139         },
140         [MDP_SSPP_TOP0_HIST_INTR] = {
141                 HIST_INTR_CLEAR,
142                 HIST_INTR_EN,
143                 HIST_INTR_STATUS
144         },
145         [MDP_INTF0_INTR] = {
146                 MDP_INTF_REV_7xxx_INTR_CLEAR(0),
147                 MDP_INTF_REV_7xxx_INTR_EN(0),
148                 MDP_INTF_REV_7xxx_INTR_STATUS(0)
149         },
150         [MDP_INTF1_INTR] = {
151                 MDP_INTF_REV_7xxx_INTR_CLEAR(1),
152                 MDP_INTF_REV_7xxx_INTR_EN(1),
153                 MDP_INTF_REV_7xxx_INTR_STATUS(1)
154         },
155         [MDP_INTF1_TEAR_INTR] = {
156                 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
157                 MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
158                 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
159         },
160         [MDP_INTF2_INTR] = {
161                 MDP_INTF_REV_7xxx_INTR_CLEAR(2),
162                 MDP_INTF_REV_7xxx_INTR_EN(2),
163                 MDP_INTF_REV_7xxx_INTR_STATUS(2)
164         },
165         [MDP_INTF2_TEAR_INTR] = {
166                 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
167                 MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
168                 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
169         },
170         [MDP_INTF3_INTR] = {
171                 MDP_INTF_REV_7xxx_INTR_CLEAR(3),
172                 MDP_INTF_REV_7xxx_INTR_EN(3),
173                 MDP_INTF_REV_7xxx_INTR_STATUS(3)
174         },
175         [MDP_INTF4_INTR] = {
176                 MDP_INTF_REV_7xxx_INTR_CLEAR(4),
177                 MDP_INTF_REV_7xxx_INTR_EN(4),
178                 MDP_INTF_REV_7xxx_INTR_STATUS(4)
179         },
180         [MDP_INTF5_INTR] = {
181                 MDP_INTF_REV_7xxx_INTR_CLEAR(5),
182                 MDP_INTF_REV_7xxx_INTR_EN(5),
183                 MDP_INTF_REV_7xxx_INTR_STATUS(5)
184         },
185         [MDP_INTF6_INTR] = {
186                 MDP_INTF_REV_7xxx_INTR_CLEAR(6),
187                 MDP_INTF_REV_7xxx_INTR_EN(6),
188                 MDP_INTF_REV_7xxx_INTR_STATUS(6)
189         },
190         [MDP_INTF7_INTR] = {
191                 MDP_INTF_REV_7xxx_INTR_CLEAR(7),
192                 MDP_INTF_REV_7xxx_INTR_EN(7),
193                 MDP_INTF_REV_7xxx_INTR_STATUS(7)
194         },
195         [MDP_INTF8_INTR] = {
196                 MDP_INTF_REV_7xxx_INTR_CLEAR(8),
197                 MDP_INTF_REV_7xxx_INTR_EN(8),
198                 MDP_INTF_REV_7xxx_INTR_STATUS(8)
199         },
200 };
201
202 #define DPU_IRQ_MASK(irq_idx)   (BIT(DPU_IRQ_BIT(irq_idx)))
203
204 static inline bool dpu_core_irq_is_valid(unsigned int irq_idx)
205 {
206         return irq_idx && irq_idx <= DPU_NUM_IRQS;
207 }
208
209 static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
210                                                                unsigned int irq_idx)
211 {
212         return &intr->irq_tbl[irq_idx - 1];
213 }
214
215 /**
216  * dpu_core_irq_callback_handler - dispatch core interrupts
217  * @dpu_kms:            Pointer to DPU's KMS structure
218  * @irq_idx:            interrupt index
219  */
220 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_idx)
221 {
222         struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
223
224         VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
225
226         if (!irq_entry->cb)
227                 DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
228                           DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
229
230         atomic_inc(&irq_entry->count);
231
232         /*
233          * Perform registered function callback
234          */
235         irq_entry->cb(irq_entry->arg);
236 }
237
238 irqreturn_t dpu_core_irq(struct msm_kms *kms)
239 {
240         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
241         struct dpu_hw_intr *intr = dpu_kms->hw_intr;
242         int reg_idx;
243         unsigned int irq_idx;
244         u32 irq_status;
245         u32 enable_mask;
246         int bit;
247         unsigned long irq_flags;
248
249         if (!intr)
250                 return IRQ_NONE;
251
252         spin_lock_irqsave(&intr->irq_lock, irq_flags);
253         for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
254                 if (!test_bit(reg_idx, &intr->irq_mask))
255                         continue;
256
257                 /* Read interrupt status */
258                 irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
259
260                 /* Read enable mask */
261                 enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
262
263                 /* and clear the interrupt */
264                 if (irq_status)
265                         DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
266                                      irq_status);
267
268                 /* Finally update IRQ status based on enable mask */
269                 irq_status &= enable_mask;
270
271                 if (!irq_status)
272                         continue;
273
274                 /*
275                  * Search through matching intr status.
276                  */
277                 while ((bit = ffs(irq_status)) != 0) {
278                         irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
279
280                         dpu_core_irq_callback_handler(dpu_kms, irq_idx);
281
282                         /*
283                          * When callback finish, clear the irq_status
284                          * with the matching mask. Once irq_status
285                          * is all cleared, the search can be stopped.
286                          */
287                         irq_status &= ~BIT(bit - 1);
288                 }
289         }
290
291         /* ensure register writes go through */
292         wmb();
293
294         spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
295
296         return IRQ_HANDLED;
297 }
298
299 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr,
300                                          unsigned int irq_idx)
301 {
302         int reg_idx;
303         const struct dpu_intr_reg *reg;
304         const char *dbgstr = NULL;
305         uint32_t cache_irq_mask;
306
307         if (!intr)
308                 return -EINVAL;
309
310         if (!dpu_core_irq_is_valid(irq_idx)) {
311                 pr_err("invalid IRQ=[%d, %d]\n",
312                        DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
313                 return -EINVAL;
314         }
315
316         /*
317          * The cache_irq_mask and hardware RMW operations needs to be done
318          * under irq_lock and it's the caller's responsibility to ensure that's
319          * held.
320          */
321         assert_spin_locked(&intr->irq_lock);
322
323         reg_idx = DPU_IRQ_REG(irq_idx);
324         reg = &intr->intr_set[reg_idx];
325
326         /* Is this interrupt register supported on the platform */
327         if (WARN_ON(!reg->en_off))
328                 return -EINVAL;
329
330         cache_irq_mask = intr->cache_irq_mask[reg_idx];
331         if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
332                 dbgstr = "already ";
333         } else {
334                 dbgstr = "";
335
336                 cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
337                 /* Cleaning any pending interrupt */
338                 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
339                 /* Enabling interrupts with the new mask */
340                 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
341
342                 /* ensure register write goes through */
343                 wmb();
344
345                 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
346         }
347
348         pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
349                  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
350                         DPU_IRQ_MASK(irq_idx), cache_irq_mask);
351
352         return 0;
353 }
354
355 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr,
356                                           unsigned int irq_idx)
357 {
358         int reg_idx;
359         const struct dpu_intr_reg *reg;
360         const char *dbgstr = NULL;
361         uint32_t cache_irq_mask;
362
363         if (!intr)
364                 return -EINVAL;
365
366         if (!dpu_core_irq_is_valid(irq_idx)) {
367                 pr_err("invalid IRQ=[%d, %d]\n",
368                        DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
369                 return -EINVAL;
370         }
371
372         /*
373          * The cache_irq_mask and hardware RMW operations needs to be done
374          * under irq_lock and it's the caller's responsibility to ensure that's
375          * held.
376          */
377         assert_spin_locked(&intr->irq_lock);
378
379         reg_idx = DPU_IRQ_REG(irq_idx);
380         reg = &intr->intr_set[reg_idx];
381
382         cache_irq_mask = intr->cache_irq_mask[reg_idx];
383         if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
384                 dbgstr = "already ";
385         } else {
386                 dbgstr = "";
387
388                 cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
389                 /* Disable interrupts based on the new mask */
390                 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
391                 /* Cleaning any pending interrupt */
392                 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
393
394                 /* ensure register write goes through */
395                 wmb();
396
397                 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
398         }
399
400         pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
401                  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
402                         DPU_IRQ_MASK(irq_idx), cache_irq_mask);
403
404         return 0;
405 }
406
407 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
408 {
409         struct dpu_hw_intr *intr = dpu_kms->hw_intr;
410         int i;
411
412         if (!intr)
413                 return;
414
415         for (i = 0; i < MDP_INTR_MAX; i++) {
416                 if (test_bit(i, &intr->irq_mask))
417                         DPU_REG_WRITE(&intr->hw,
418                                         intr->intr_set[i].clr_off, 0xffffffff);
419         }
420
421         /* ensure register writes go through */
422         wmb();
423 }
424
425 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
426 {
427         struct dpu_hw_intr *intr = dpu_kms->hw_intr;
428         int i;
429
430         if (!intr)
431                 return;
432
433         for (i = 0; i < MDP_INTR_MAX; i++) {
434                 if (test_bit(i, &intr->irq_mask))
435                         DPU_REG_WRITE(&intr->hw,
436                                         intr->intr_set[i].en_off, 0x00000000);
437         }
438
439         /* ensure register writes go through */
440         wmb();
441 }
442
443 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
444                       unsigned int irq_idx)
445 {
446         struct dpu_hw_intr *intr = dpu_kms->hw_intr;
447         int reg_idx;
448         unsigned long irq_flags;
449         u32 intr_status;
450
451         if (!intr)
452                 return 0;
453
454         if (!dpu_core_irq_is_valid(irq_idx)) {
455                 pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
456                 return 0;
457         }
458
459         spin_lock_irqsave(&intr->irq_lock, irq_flags);
460
461         reg_idx = DPU_IRQ_REG(irq_idx);
462         intr_status = DPU_REG_READ(&intr->hw,
463                         intr->intr_set[reg_idx].status_off) &
464                 DPU_IRQ_MASK(irq_idx);
465         if (intr_status)
466                 DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
467                                 intr_status);
468
469         /* ensure register writes go through */
470         wmb();
471
472         spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
473
474         return intr_status;
475 }
476
477 struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
478                                      void __iomem *addr,
479                                      const struct dpu_mdss_cfg *m)
480 {
481         struct dpu_hw_intr *intr;
482         unsigned int i;
483
484         if (!addr || !m)
485                 return ERR_PTR(-EINVAL);
486
487         intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
488         if (!intr)
489                 return ERR_PTR(-ENOMEM);
490
491         if (m->mdss_ver->core_major_ver >= 7)
492                 intr->intr_set = dpu_intr_set_7xxx;
493         else
494                 intr->intr_set = dpu_intr_set_legacy;
495
496         intr->hw.blk_addr = addr + m->mdp[0].base;
497
498         intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
499                          BIT(MDP_SSPP_TOP0_INTR2) |
500                          BIT(MDP_SSPP_TOP0_HIST_INTR);
501         for (i = 0; i < m->intf_count; i++) {
502                 const struct dpu_intf_cfg *intf = &m->intf[i];
503
504                 if (intf->type == INTF_NONE)
505                         continue;
506
507                 intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
508
509                 if (intf->intr_tear_rd_ptr)
510                         intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
511         }
512
513         spin_lock_init(&intr->irq_lock);
514
515         return intr;
516 }
517
518 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
519                                    unsigned int irq_idx,
520                                    void (*irq_cb)(void *arg),
521                                    void *irq_arg)
522 {
523         struct dpu_hw_intr_entry *irq_entry;
524         unsigned long irq_flags;
525         int ret;
526
527         if (!irq_cb) {
528                 DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
529                           DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
530                 return -EINVAL;
531         }
532
533         if (!dpu_core_irq_is_valid(irq_idx)) {
534                 DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
535                           DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
536                 return -EINVAL;
537         }
538
539         VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
540              DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
541
542         spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
543
544         irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
545         if (unlikely(WARN_ON(irq_entry->cb))) {
546                 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
547
548                 return -EBUSY;
549         }
550
551         trace_dpu_core_irq_register_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
552         irq_entry->arg = irq_arg;
553         irq_entry->cb = irq_cb;
554
555         ret = dpu_hw_intr_enable_irq_locked(
556                                 dpu_kms->hw_intr,
557                                 irq_idx);
558         if (ret)
559                 DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
560                           DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
561         spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
562
563         trace_dpu_irq_register_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
564
565         return 0;
566 }
567
568 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
569                                      unsigned int irq_idx)
570 {
571         struct dpu_hw_intr_entry *irq_entry;
572         unsigned long irq_flags;
573         int ret;
574
575         if (!dpu_core_irq_is_valid(irq_idx)) {
576                 DPU_ERROR("invalid IRQ=[%d, %d]\n",
577                           DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
578                 return -EINVAL;
579         }
580
581         VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
582              DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
583
584         spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
585         trace_dpu_core_irq_unregister_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
586
587         ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
588         if (ret)
589                 DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
590                           DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
591
592         irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
593         irq_entry->cb = NULL;
594         irq_entry->arg = NULL;
595
596         spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
597
598         trace_dpu_irq_unregister_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
599
600         return 0;
601 }
602
603 #ifdef CONFIG_DEBUG_FS
604 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
605 {
606         struct dpu_kms *dpu_kms = s->private;
607         struct dpu_hw_intr_entry *irq_entry;
608         unsigned long irq_flags;
609         int i, irq_count;
610         void *cb;
611
612         for (i = 1; i <= DPU_NUM_IRQS; i++) {
613                 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
614                 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
615                 irq_count = atomic_read(&irq_entry->count);
616                 cb = irq_entry->cb;
617                 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
618
619                 if (irq_count || cb)
620                         seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
621                                    DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
622         }
623
624         return 0;
625 }
626
627 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
628
629 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
630                 struct dentry *parent)
631 {
632         debugfs_create_file("core_irq", 0600, parent, dpu_kms,
633                 &dpu_debugfs_core_irq_fops);
634 }
635 #endif
636
637 void dpu_core_irq_preinstall(struct msm_kms *kms)
638 {
639         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
640         struct dpu_hw_intr_entry *irq_entry;
641         int i;
642
643         pm_runtime_get_sync(&dpu_kms->pdev->dev);
644         dpu_clear_irqs(dpu_kms);
645         dpu_disable_all_irqs(dpu_kms);
646         pm_runtime_put_sync(&dpu_kms->pdev->dev);
647
648         for (i = 1; i <= DPU_NUM_IRQS; i++) {
649                 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
650                 atomic_set(&irq_entry->count, 0);
651         }
652 }
653
654 void dpu_core_irq_uninstall(struct msm_kms *kms)
655 {
656         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
657         struct dpu_hw_intr_entry *irq_entry;
658         int i;
659
660         if (!dpu_kms->hw_intr)
661                 return;
662
663         pm_runtime_get_sync(&dpu_kms->pdev->dev);
664         for (i = 1; i <= DPU_NUM_IRQS; i++) {
665                 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
666                 if (irq_entry->cb)
667                         DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
668                                   DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
669         }
670
671         dpu_clear_irqs(dpu_kms);
672         dpu_disable_all_irqs(dpu_kms);
673         pm_runtime_put_sync(&dpu_kms->pdev->dev);
674 }