1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
9 #include <drm/drm_managed.h>
11 #include "dpu_core_irq.h"
13 #include "dpu_hw_interrupts.h"
14 #include "dpu_hw_util.h"
15 #include "dpu_hw_mdss.h"
16 #include "dpu_trace.h"
19 * Register offsets in MDSS register file for the interrupt registers
22 #define MDP_INTF_OFF(intf) (0x6A000 + 0x800 * (intf))
23 #define MDP_INTF_INTR_EN(intf) (MDP_INTF_OFF(intf) + 0x1c0)
24 #define MDP_INTF_INTR_STATUS(intf) (MDP_INTF_OFF(intf) + 0x1c4)
25 #define MDP_INTF_INTR_CLEAR(intf) (MDP_INTF_OFF(intf) + 0x1c8)
26 #define MDP_INTF_TEAR_OFF(intf) (0x6D700 + 0x100 * (intf))
27 #define MDP_INTF_INTR_TEAR_EN(intf) (MDP_INTF_TEAR_OFF(intf) + 0x000)
28 #define MDP_INTF_INTR_TEAR_STATUS(intf) (MDP_INTF_TEAR_OFF(intf) + 0x004)
29 #define MDP_INTF_INTR_TEAR_CLEAR(intf) (MDP_INTF_TEAR_OFF(intf) + 0x008)
30 #define MDP_AD4_OFF(ad4) (0x7C000 + 0x1000 * (ad4))
31 #define MDP_AD4_INTR_EN_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x41c)
32 #define MDP_AD4_INTR_CLEAR_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x424)
33 #define MDP_AD4_INTR_STATUS_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x420)
34 #define MDP_INTF_REV_7xxx_OFF(intf) (0x34000 + 0x1000 * (intf))
35 #define MDP_INTF_REV_7xxx_INTR_EN(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
36 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
37 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
38 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf) (0x34800 + 0x1000 * (intf))
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
40 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
41 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
44 * struct dpu_intr_reg - array of DPU register sets
45 * @clr_off: offset to CLEAR reg
46 * @en_off: offset to ENABLE reg
47 * @status_off: offset to STATUS reg
56 * dpu_intr_set_legacy - List of DPU interrupt registers for DPU <= 6.x
58 static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
59 [MDP_SSPP_TOP0_INTR] = {
64 [MDP_SSPP_TOP0_INTR2] = {
69 [MDP_SSPP_TOP0_HIST_INTR] = {
75 MDP_INTF_INTR_CLEAR(0),
77 MDP_INTF_INTR_STATUS(0)
80 MDP_INTF_INTR_CLEAR(1),
82 MDP_INTF_INTR_STATUS(1)
85 MDP_INTF_INTR_CLEAR(2),
87 MDP_INTF_INTR_STATUS(2)
90 MDP_INTF_INTR_CLEAR(3),
92 MDP_INTF_INTR_STATUS(3)
95 MDP_INTF_INTR_CLEAR(4),
97 MDP_INTF_INTR_STATUS(4)
100 MDP_INTF_INTR_CLEAR(5),
102 MDP_INTF_INTR_STATUS(5)
104 [MDP_INTF1_TEAR_INTR] = {
105 MDP_INTF_INTR_TEAR_CLEAR(1),
106 MDP_INTF_INTR_TEAR_EN(1),
107 MDP_INTF_INTR_TEAR_STATUS(1)
109 [MDP_INTF2_TEAR_INTR] = {
110 MDP_INTF_INTR_TEAR_CLEAR(2),
111 MDP_INTF_INTR_TEAR_EN(2),
112 MDP_INTF_INTR_TEAR_STATUS(2)
115 MDP_AD4_INTR_CLEAR_OFF(0),
116 MDP_AD4_INTR_EN_OFF(0),
117 MDP_AD4_INTR_STATUS_OFF(0),
120 MDP_AD4_INTR_CLEAR_OFF(1),
121 MDP_AD4_INTR_EN_OFF(1),
122 MDP_AD4_INTR_STATUS_OFF(1),
127 * dpu_intr_set_7xxx - List of DPU interrupt registers for DPU >= 7.0
129 static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
130 [MDP_SSPP_TOP0_INTR] = {
135 [MDP_SSPP_TOP0_INTR2] = {
140 [MDP_SSPP_TOP0_HIST_INTR] = {
146 MDP_INTF_REV_7xxx_INTR_CLEAR(0),
147 MDP_INTF_REV_7xxx_INTR_EN(0),
148 MDP_INTF_REV_7xxx_INTR_STATUS(0)
151 MDP_INTF_REV_7xxx_INTR_CLEAR(1),
152 MDP_INTF_REV_7xxx_INTR_EN(1),
153 MDP_INTF_REV_7xxx_INTR_STATUS(1)
155 [MDP_INTF1_TEAR_INTR] = {
156 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
157 MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
158 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
161 MDP_INTF_REV_7xxx_INTR_CLEAR(2),
162 MDP_INTF_REV_7xxx_INTR_EN(2),
163 MDP_INTF_REV_7xxx_INTR_STATUS(2)
165 [MDP_INTF2_TEAR_INTR] = {
166 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
167 MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
168 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
171 MDP_INTF_REV_7xxx_INTR_CLEAR(3),
172 MDP_INTF_REV_7xxx_INTR_EN(3),
173 MDP_INTF_REV_7xxx_INTR_STATUS(3)
176 MDP_INTF_REV_7xxx_INTR_CLEAR(4),
177 MDP_INTF_REV_7xxx_INTR_EN(4),
178 MDP_INTF_REV_7xxx_INTR_STATUS(4)
181 MDP_INTF_REV_7xxx_INTR_CLEAR(5),
182 MDP_INTF_REV_7xxx_INTR_EN(5),
183 MDP_INTF_REV_7xxx_INTR_STATUS(5)
186 MDP_INTF_REV_7xxx_INTR_CLEAR(6),
187 MDP_INTF_REV_7xxx_INTR_EN(6),
188 MDP_INTF_REV_7xxx_INTR_STATUS(6)
191 MDP_INTF_REV_7xxx_INTR_CLEAR(7),
192 MDP_INTF_REV_7xxx_INTR_EN(7),
193 MDP_INTF_REV_7xxx_INTR_STATUS(7)
196 MDP_INTF_REV_7xxx_INTR_CLEAR(8),
197 MDP_INTF_REV_7xxx_INTR_EN(8),
198 MDP_INTF_REV_7xxx_INTR_STATUS(8)
202 #define DPU_IRQ_MASK(irq_idx) (BIT(DPU_IRQ_BIT(irq_idx)))
204 static inline bool dpu_core_irq_is_valid(unsigned int irq_idx)
206 return irq_idx && irq_idx <= DPU_NUM_IRQS;
209 static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
210 unsigned int irq_idx)
212 return &intr->irq_tbl[irq_idx - 1];
216 * dpu_core_irq_callback_handler - dispatch core interrupts
217 * @dpu_kms: Pointer to DPU's KMS structure
218 * @irq_idx: interrupt index
220 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_idx)
222 struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
224 VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
227 DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
228 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
230 atomic_inc(&irq_entry->count);
233 * Perform registered function callback
235 irq_entry->cb(irq_entry->arg);
238 irqreturn_t dpu_core_irq(struct msm_kms *kms)
240 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
241 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
243 unsigned int irq_idx;
247 unsigned long irq_flags;
252 spin_lock_irqsave(&intr->irq_lock, irq_flags);
253 for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
254 if (!test_bit(reg_idx, &intr->irq_mask))
257 /* Read interrupt status */
258 irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
260 /* Read enable mask */
261 enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
263 /* and clear the interrupt */
265 DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
268 /* Finally update IRQ status based on enable mask */
269 irq_status &= enable_mask;
275 * Search through matching intr status.
277 while ((bit = ffs(irq_status)) != 0) {
278 irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
280 dpu_core_irq_callback_handler(dpu_kms, irq_idx);
283 * When callback finish, clear the irq_status
284 * with the matching mask. Once irq_status
285 * is all cleared, the search can be stopped.
287 irq_status &= ~BIT(bit - 1);
291 /* ensure register writes go through */
294 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
299 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr,
300 unsigned int irq_idx)
303 const struct dpu_intr_reg *reg;
304 const char *dbgstr = NULL;
305 uint32_t cache_irq_mask;
310 if (!dpu_core_irq_is_valid(irq_idx)) {
311 pr_err("invalid IRQ=[%d, %d]\n",
312 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
317 * The cache_irq_mask and hardware RMW operations needs to be done
318 * under irq_lock and it's the caller's responsibility to ensure that's
321 assert_spin_locked(&intr->irq_lock);
323 reg_idx = DPU_IRQ_REG(irq_idx);
324 reg = &intr->intr_set[reg_idx];
326 /* Is this interrupt register supported on the platform */
327 if (WARN_ON(!reg->en_off))
330 cache_irq_mask = intr->cache_irq_mask[reg_idx];
331 if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
336 cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
337 /* Cleaning any pending interrupt */
338 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
339 /* Enabling interrupts with the new mask */
340 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
342 /* ensure register write goes through */
345 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
348 pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
349 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
350 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
355 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr,
356 unsigned int irq_idx)
359 const struct dpu_intr_reg *reg;
360 const char *dbgstr = NULL;
361 uint32_t cache_irq_mask;
366 if (!dpu_core_irq_is_valid(irq_idx)) {
367 pr_err("invalid IRQ=[%d, %d]\n",
368 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
373 * The cache_irq_mask and hardware RMW operations needs to be done
374 * under irq_lock and it's the caller's responsibility to ensure that's
377 assert_spin_locked(&intr->irq_lock);
379 reg_idx = DPU_IRQ_REG(irq_idx);
380 reg = &intr->intr_set[reg_idx];
382 cache_irq_mask = intr->cache_irq_mask[reg_idx];
383 if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
388 cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
389 /* Disable interrupts based on the new mask */
390 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
391 /* Cleaning any pending interrupt */
392 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
394 /* ensure register write goes through */
397 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
400 pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
401 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
402 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
407 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
409 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
415 for (i = 0; i < MDP_INTR_MAX; i++) {
416 if (test_bit(i, &intr->irq_mask))
417 DPU_REG_WRITE(&intr->hw,
418 intr->intr_set[i].clr_off, 0xffffffff);
421 /* ensure register writes go through */
425 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
427 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
433 for (i = 0; i < MDP_INTR_MAX; i++) {
434 if (test_bit(i, &intr->irq_mask))
435 DPU_REG_WRITE(&intr->hw,
436 intr->intr_set[i].en_off, 0x00000000);
439 /* ensure register writes go through */
443 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
444 unsigned int irq_idx)
446 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
448 unsigned long irq_flags;
454 if (!dpu_core_irq_is_valid(irq_idx)) {
455 pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
459 spin_lock_irqsave(&intr->irq_lock, irq_flags);
461 reg_idx = DPU_IRQ_REG(irq_idx);
462 intr_status = DPU_REG_READ(&intr->hw,
463 intr->intr_set[reg_idx].status_off) &
464 DPU_IRQ_MASK(irq_idx);
466 DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
469 /* ensure register writes go through */
472 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
477 struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
479 const struct dpu_mdss_cfg *m)
481 struct dpu_hw_intr *intr;
485 return ERR_PTR(-EINVAL);
487 intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
489 return ERR_PTR(-ENOMEM);
491 if (m->mdss_ver->core_major_ver >= 7)
492 intr->intr_set = dpu_intr_set_7xxx;
494 intr->intr_set = dpu_intr_set_legacy;
496 intr->hw.blk_addr = addr + m->mdp[0].base;
498 intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
499 BIT(MDP_SSPP_TOP0_INTR2) |
500 BIT(MDP_SSPP_TOP0_HIST_INTR);
501 for (i = 0; i < m->intf_count; i++) {
502 const struct dpu_intf_cfg *intf = &m->intf[i];
504 if (intf->type == INTF_NONE)
507 intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
509 if (intf->intr_tear_rd_ptr)
510 intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
513 spin_lock_init(&intr->irq_lock);
518 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
519 unsigned int irq_idx,
520 void (*irq_cb)(void *arg),
523 struct dpu_hw_intr_entry *irq_entry;
524 unsigned long irq_flags;
528 DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
529 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
533 if (!dpu_core_irq_is_valid(irq_idx)) {
534 DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
535 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
539 VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
540 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
542 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
544 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
545 if (unlikely(WARN_ON(irq_entry->cb))) {
546 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
551 trace_dpu_core_irq_register_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
552 irq_entry->arg = irq_arg;
553 irq_entry->cb = irq_cb;
555 ret = dpu_hw_intr_enable_irq_locked(
559 DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
560 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
561 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
563 trace_dpu_irq_register_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
568 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
569 unsigned int irq_idx)
571 struct dpu_hw_intr_entry *irq_entry;
572 unsigned long irq_flags;
575 if (!dpu_core_irq_is_valid(irq_idx)) {
576 DPU_ERROR("invalid IRQ=[%d, %d]\n",
577 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
581 VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
582 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
584 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
585 trace_dpu_core_irq_unregister_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
587 ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
589 DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
590 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
592 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
593 irq_entry->cb = NULL;
594 irq_entry->arg = NULL;
596 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
598 trace_dpu_irq_unregister_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
603 #ifdef CONFIG_DEBUG_FS
604 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
606 struct dpu_kms *dpu_kms = s->private;
607 struct dpu_hw_intr_entry *irq_entry;
608 unsigned long irq_flags;
612 for (i = 1; i <= DPU_NUM_IRQS; i++) {
613 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
614 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
615 irq_count = atomic_read(&irq_entry->count);
617 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
620 seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
621 DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
627 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
629 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
630 struct dentry *parent)
632 debugfs_create_file("core_irq", 0600, parent, dpu_kms,
633 &dpu_debugfs_core_irq_fops);
637 void dpu_core_irq_preinstall(struct msm_kms *kms)
639 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
640 struct dpu_hw_intr_entry *irq_entry;
643 pm_runtime_get_sync(&dpu_kms->pdev->dev);
644 dpu_clear_irqs(dpu_kms);
645 dpu_disable_all_irqs(dpu_kms);
646 pm_runtime_put_sync(&dpu_kms->pdev->dev);
648 for (i = 1; i <= DPU_NUM_IRQS; i++) {
649 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
650 atomic_set(&irq_entry->count, 0);
654 void dpu_core_irq_uninstall(struct msm_kms *kms)
656 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
657 struct dpu_hw_intr_entry *irq_entry;
660 if (!dpu_kms->hw_intr)
663 pm_runtime_get_sync(&dpu_kms->pdev->dev);
664 for (i = 1; i <= DPU_NUM_IRQS; i++) {
665 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
667 DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
668 DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
671 dpu_clear_irqs(dpu_kms);
672 dpu_disable_all_irqs(dpu_kms);
673 pm_runtime_put_sync(&dpu_kms->pdev->dev);