Merge branch 'for-current' of https://github.com/PeterHuewe/linux-tpmdd into for...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_irq.c
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/irqdomain.h>
19 #include <linux/irq.h>
20
21 #include "msm_drv.h"
22 #include "mdp5_kms.h"
23
24 void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
25 {
26         mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
27 }
28
29 static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
30 {
31         DRM_ERROR("errors: %08x\n", irqstatus);
32 }
33
34 void mdp5_irq_preinstall(struct msm_kms *kms)
35 {
36         struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
37         mdp5_enable(mdp5_kms);
38         mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
39         mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
40         mdp5_disable(mdp5_kms);
41 }
42
43 int mdp5_irq_postinstall(struct msm_kms *kms)
44 {
45         struct mdp_kms *mdp_kms = to_mdp_kms(kms);
46         struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
47         struct mdp_irq *error_handler = &mdp5_kms->error_handler;
48
49         error_handler->irq = mdp5_irq_error_handler;
50         error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
51                         MDP5_IRQ_INTF1_UNDER_RUN |
52                         MDP5_IRQ_INTF2_UNDER_RUN |
53                         MDP5_IRQ_INTF3_UNDER_RUN;
54
55         mdp_irq_register(mdp_kms, error_handler);
56
57         return 0;
58 }
59
60 void mdp5_irq_uninstall(struct msm_kms *kms)
61 {
62         struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
63         mdp5_enable(mdp5_kms);
64         mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
65         mdp5_disable(mdp5_kms);
66 }
67
68 static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
69 {
70         struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
71         struct drm_device *dev = mdp5_kms->dev;
72         struct msm_drm_private *priv = dev->dev_private;
73         unsigned int id;
74         uint32_t status;
75
76         status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
77         mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
78
79         VERB("status=%08x", status);
80
81         mdp_dispatch_irqs(mdp_kms, status);
82
83         for (id = 0; id < priv->num_crtcs; id++)
84                 if (status & mdp5_crtc_vblank(priv->crtcs[id]))
85                         drm_handle_vblank(dev, id);
86 }
87
88 irqreturn_t mdp5_irq(struct msm_kms *kms)
89 {
90         struct mdp_kms *mdp_kms = to_mdp_kms(kms);
91         struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
92         uint32_t intr;
93
94         intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
95
96         VERB("intr=%08x", intr);
97
98         if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
99                 mdp5_irq_mdp(mdp_kms);
100                 intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
101         }
102
103         while (intr) {
104                 irq_hw_number_t hwirq = fls(intr) - 1;
105                 generic_handle_irq(irq_find_mapping(
106                                 mdp5_kms->irqcontroller.domain, hwirq));
107                 intr &= ~(1 << hwirq);
108         }
109
110         return IRQ_HANDLED;
111 }
112
113 int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
114 {
115         mdp_update_vblank_mask(to_mdp_kms(kms),
116                         mdp5_crtc_vblank(crtc), true);
117         return 0;
118 }
119
120 void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
121 {
122         mdp_update_vblank_mask(to_mdp_kms(kms),
123                         mdp5_crtc_vblank(crtc), false);
124 }
125
126 /*
127  * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
128  * can register to get their irq's delivered
129  */
130
131 #define VALID_IRQS  (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
132                 MDP5_HW_INTR_STATUS_INTR_DSI1 | \
133                 MDP5_HW_INTR_STATUS_INTR_HDMI | \
134                 MDP5_HW_INTR_STATUS_INTR_EDP)
135
136 static void mdp5_hw_mask_irq(struct irq_data *irqd)
137 {
138         struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
139         smp_mb__before_atomic();
140         clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
141         smp_mb__after_atomic();
142 }
143
144 static void mdp5_hw_unmask_irq(struct irq_data *irqd)
145 {
146         struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
147         smp_mb__before_atomic();
148         set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
149         smp_mb__after_atomic();
150 }
151
152 static struct irq_chip mdp5_hw_irq_chip = {
153         .name           = "mdp5",
154         .irq_mask       = mdp5_hw_mask_irq,
155         .irq_unmask     = mdp5_hw_unmask_irq,
156 };
157
158 static int mdp5_hw_irqdomain_map(struct irq_domain *d,
159                 unsigned int irq, irq_hw_number_t hwirq)
160 {
161         struct mdp5_kms *mdp5_kms = d->host_data;
162
163         if (!(VALID_IRQS & (1 << hwirq)))
164                 return -EPERM;
165
166         irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
167         irq_set_chip_data(irq, mdp5_kms);
168         set_irq_flags(irq, IRQF_VALID);
169
170         return 0;
171 }
172
173 static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
174         .map = mdp5_hw_irqdomain_map,
175         .xlate = irq_domain_xlate_onecell,
176 };
177
178
179 int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
180 {
181         struct device *dev = mdp5_kms->dev->dev;
182         struct irq_domain *d;
183
184         d = irq_domain_add_linear(dev->of_node, 32,
185                         &mdp5_hw_irqdomain_ops, mdp5_kms);
186         if (!d) {
187                 dev_err(dev, "mdp5 irq domain add failed\n");
188                 return -ENXIO;
189         }
190
191         mdp5_kms->irqcontroller.enabled_mask = 0;
192         mdp5_kms->irqcontroller.domain = d;
193
194         return 0;
195 }
196
197 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
198 {
199         if (mdp5_kms->irqcontroller.domain) {
200                 irq_domain_remove(mdp5_kms->irqcontroller.domain);
201                 mdp5_kms->irqcontroller.domain = NULL;
202         }
203 }