Merge branch 'i2c-mux/for-current' of https://github.com/peda-r/i2c-mux into i2c...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51
52 #define for_each_power_well(i, power_well, domain_mask, power_domains)  \
53         for (i = 0;                                                     \
54              i < (power_domains)->power_well_count &&                   \
55                  ((power_well) = &(power_domains)->power_wells[i]);     \
56              i++)                                                       \
57                 for_each_if ((power_well)->domains & (domain_mask))
58
59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60         for (i = (power_domains)->power_well_count - 1;                  \
61              i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62              i--)                                                        \
63                 for_each_if ((power_well)->domains & (domain_mask))
64
65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66                                     int power_well_id);
67
68 static struct i915_power_well *
69 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
70
71 const char *
72 intel_display_power_domain_str(enum intel_display_power_domain domain)
73 {
74         switch (domain) {
75         case POWER_DOMAIN_PIPE_A:
76                 return "PIPE_A";
77         case POWER_DOMAIN_PIPE_B:
78                 return "PIPE_B";
79         case POWER_DOMAIN_PIPE_C:
80                 return "PIPE_C";
81         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
82                 return "PIPE_A_PANEL_FITTER";
83         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
84                 return "PIPE_B_PANEL_FITTER";
85         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
86                 return "PIPE_C_PANEL_FITTER";
87         case POWER_DOMAIN_TRANSCODER_A:
88                 return "TRANSCODER_A";
89         case POWER_DOMAIN_TRANSCODER_B:
90                 return "TRANSCODER_B";
91         case POWER_DOMAIN_TRANSCODER_C:
92                 return "TRANSCODER_C";
93         case POWER_DOMAIN_TRANSCODER_EDP:
94                 return "TRANSCODER_EDP";
95         case POWER_DOMAIN_TRANSCODER_DSI_A:
96                 return "TRANSCODER_DSI_A";
97         case POWER_DOMAIN_TRANSCODER_DSI_C:
98                 return "TRANSCODER_DSI_C";
99         case POWER_DOMAIN_PORT_DDI_A_LANES:
100                 return "PORT_DDI_A_LANES";
101         case POWER_DOMAIN_PORT_DDI_B_LANES:
102                 return "PORT_DDI_B_LANES";
103         case POWER_DOMAIN_PORT_DDI_C_LANES:
104                 return "PORT_DDI_C_LANES";
105         case POWER_DOMAIN_PORT_DDI_D_LANES:
106                 return "PORT_DDI_D_LANES";
107         case POWER_DOMAIN_PORT_DDI_E_LANES:
108                 return "PORT_DDI_E_LANES";
109         case POWER_DOMAIN_PORT_DSI:
110                 return "PORT_DSI";
111         case POWER_DOMAIN_PORT_CRT:
112                 return "PORT_CRT";
113         case POWER_DOMAIN_PORT_OTHER:
114                 return "PORT_OTHER";
115         case POWER_DOMAIN_VGA:
116                 return "VGA";
117         case POWER_DOMAIN_AUDIO:
118                 return "AUDIO";
119         case POWER_DOMAIN_PLLS:
120                 return "PLLS";
121         case POWER_DOMAIN_AUX_A:
122                 return "AUX_A";
123         case POWER_DOMAIN_AUX_B:
124                 return "AUX_B";
125         case POWER_DOMAIN_AUX_C:
126                 return "AUX_C";
127         case POWER_DOMAIN_AUX_D:
128                 return "AUX_D";
129         case POWER_DOMAIN_GMBUS:
130                 return "GMBUS";
131         case POWER_DOMAIN_INIT:
132                 return "INIT";
133         case POWER_DOMAIN_MODESET:
134                 return "MODESET";
135         default:
136                 MISSING_CASE(domain);
137                 return "?";
138         }
139 }
140
141 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
142                                     struct i915_power_well *power_well)
143 {
144         DRM_DEBUG_KMS("enabling %s\n", power_well->name);
145         power_well->ops->enable(dev_priv, power_well);
146         power_well->hw_enabled = true;
147 }
148
149 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
150                                      struct i915_power_well *power_well)
151 {
152         DRM_DEBUG_KMS("disabling %s\n", power_well->name);
153         power_well->hw_enabled = false;
154         power_well->ops->disable(dev_priv, power_well);
155 }
156
157 static void intel_power_well_get(struct drm_i915_private *dev_priv,
158                                  struct i915_power_well *power_well)
159 {
160         if (!power_well->count++)
161                 intel_power_well_enable(dev_priv, power_well);
162 }
163
164 static void intel_power_well_put(struct drm_i915_private *dev_priv,
165                                  struct i915_power_well *power_well)
166 {
167         WARN(!power_well->count, "Use count on power well %s is already zero",
168              power_well->name);
169
170         if (!--power_well->count)
171                 intel_power_well_disable(dev_priv, power_well);
172 }
173
174 /*
175  * We should only use the power well if we explicitly asked the hardware to
176  * enable it, so check if it's enabled and also check if we've requested it to
177  * be enabled.
178  */
179 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
180                                    struct i915_power_well *power_well)
181 {
182         return I915_READ(HSW_PWR_WELL_DRIVER) ==
183                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
184 }
185
186 /**
187  * __intel_display_power_is_enabled - unlocked check for a power domain
188  * @dev_priv: i915 device instance
189  * @domain: power domain to check
190  *
191  * This is the unlocked version of intel_display_power_is_enabled() and should
192  * only be used from error capture and recovery code where deadlocks are
193  * possible.
194  *
195  * Returns:
196  * True when the power domain is enabled, false otherwise.
197  */
198 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
199                                       enum intel_display_power_domain domain)
200 {
201         struct i915_power_domains *power_domains;
202         struct i915_power_well *power_well;
203         bool is_enabled;
204         int i;
205
206         if (dev_priv->pm.suspended)
207                 return false;
208
209         power_domains = &dev_priv->power_domains;
210
211         is_enabled = true;
212
213         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
214                 if (power_well->always_on)
215                         continue;
216
217                 if (!power_well->hw_enabled) {
218                         is_enabled = false;
219                         break;
220                 }
221         }
222
223         return is_enabled;
224 }
225
226 /**
227  * intel_display_power_is_enabled - check for a power domain
228  * @dev_priv: i915 device instance
229  * @domain: power domain to check
230  *
231  * This function can be used to check the hw power domain state. It is mostly
232  * used in hardware state readout functions. Everywhere else code should rely
233  * upon explicit power domain reference counting to ensure that the hardware
234  * block is powered up before accessing it.
235  *
236  * Callers must hold the relevant modesetting locks to ensure that concurrent
237  * threads can't disable the power well while the caller tries to read a few
238  * registers.
239  *
240  * Returns:
241  * True when the power domain is enabled, false otherwise.
242  */
243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
244                                     enum intel_display_power_domain domain)
245 {
246         struct i915_power_domains *power_domains;
247         bool ret;
248
249         power_domains = &dev_priv->power_domains;
250
251         mutex_lock(&power_domains->lock);
252         ret = __intel_display_power_is_enabled(dev_priv, domain);
253         mutex_unlock(&power_domains->lock);
254
255         return ret;
256 }
257
258 /**
259  * intel_display_set_init_power - set the initial power domain state
260  * @dev_priv: i915 device instance
261  * @enable: whether to enable or disable the initial power domain state
262  *
263  * For simplicity our driver load/unload and system suspend/resume code assumes
264  * that all power domains are always enabled. This functions controls the state
265  * of this little hack. While the initial power domain state is enabled runtime
266  * pm is effectively disabled.
267  */
268 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
269                                   bool enable)
270 {
271         if (dev_priv->power_domains.init_power_on == enable)
272                 return;
273
274         if (enable)
275                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
276         else
277                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
278
279         dev_priv->power_domains.init_power_on = enable;
280 }
281
282 /*
283  * Starting with Haswell, we have a "Power Down Well" that can be turned off
284  * when not needed anymore. We have 4 registers that can request the power well
285  * to be enabled, and it will only be disabled if none of the registers is
286  * requesting it to be enabled.
287  */
288 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
289 {
290         struct pci_dev *pdev = dev_priv->drm.pdev;
291
292         /*
293          * After we re-enable the power well, if we touch VGA register 0x3d5
294          * we'll get unclaimed register interrupts. This stops after we write
295          * anything to the VGA MSR register. The vgacon module uses this
296          * register all the time, so if we unbind our driver and, as a
297          * consequence, bind vgacon, we'll get stuck in an infinite loop at
298          * console_unlock(). So make here we touch the VGA MSR register, making
299          * sure vgacon can keep working normally without triggering interrupts
300          * and error messages.
301          */
302         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
303         outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
304         vga_put(pdev, VGA_RSRC_LEGACY_IO);
305
306         if (IS_BROADWELL(dev_priv))
307                 gen8_irq_power_well_post_enable(dev_priv,
308                                                 1 << PIPE_C | 1 << PIPE_B);
309 }
310
311 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
312 {
313         if (IS_BROADWELL(dev_priv))
314                 gen8_irq_power_well_pre_disable(dev_priv,
315                                                 1 << PIPE_C | 1 << PIPE_B);
316 }
317
318 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
319                                        struct i915_power_well *power_well)
320 {
321         struct pci_dev *pdev = dev_priv->drm.pdev;
322
323         /*
324          * After we re-enable the power well, if we touch VGA register 0x3d5
325          * we'll get unclaimed register interrupts. This stops after we write
326          * anything to the VGA MSR register. The vgacon module uses this
327          * register all the time, so if we unbind our driver and, as a
328          * consequence, bind vgacon, we'll get stuck in an infinite loop at
329          * console_unlock(). So make here we touch the VGA MSR register, making
330          * sure vgacon can keep working normally without triggering interrupts
331          * and error messages.
332          */
333         if (power_well->id == SKL_DISP_PW_2) {
334                 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
335                 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
336                 vga_put(pdev, VGA_RSRC_LEGACY_IO);
337
338                 gen8_irq_power_well_post_enable(dev_priv,
339                                                 1 << PIPE_C | 1 << PIPE_B);
340         }
341 }
342
343 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
344                                        struct i915_power_well *power_well)
345 {
346         if (power_well->id == SKL_DISP_PW_2)
347                 gen8_irq_power_well_pre_disable(dev_priv,
348                                                 1 << PIPE_C | 1 << PIPE_B);
349 }
350
351 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
352                                struct i915_power_well *power_well, bool enable)
353 {
354         bool is_enabled, enable_requested;
355         uint32_t tmp;
356
357         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
358         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
359         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
360
361         if (enable) {
362                 if (!enable_requested)
363                         I915_WRITE(HSW_PWR_WELL_DRIVER,
364                                    HSW_PWR_WELL_ENABLE_REQUEST);
365
366                 if (!is_enabled) {
367                         DRM_DEBUG_KMS("Enabling power well\n");
368                         if (intel_wait_for_register(dev_priv,
369                                                     HSW_PWR_WELL_DRIVER,
370                                                     HSW_PWR_WELL_STATE_ENABLED,
371                                                     HSW_PWR_WELL_STATE_ENABLED,
372                                                     20))
373                                 DRM_ERROR("Timeout enabling power well\n");
374                         hsw_power_well_post_enable(dev_priv);
375                 }
376
377         } else {
378                 if (enable_requested) {
379                         hsw_power_well_pre_disable(dev_priv);
380                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
381                         POSTING_READ(HSW_PWR_WELL_DRIVER);
382                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
383                 }
384         }
385 }
386
387 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
388         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
389         BIT(POWER_DOMAIN_PIPE_B) |                      \
390         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
391         BIT(POWER_DOMAIN_PIPE_C) |                      \
392         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
393         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
394         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
395         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
396         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
397         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
398         BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |            \
399         BIT(POWER_DOMAIN_AUX_B) |                       \
400         BIT(POWER_DOMAIN_AUX_C) |                       \
401         BIT(POWER_DOMAIN_AUX_D) |                       \
402         BIT(POWER_DOMAIN_AUDIO) |                       \
403         BIT(POWER_DOMAIN_VGA) |                         \
404         BIT(POWER_DOMAIN_INIT))
405 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (             \
406         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
407         BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |            \
408         BIT(POWER_DOMAIN_INIT))
409 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (               \
410         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
411         BIT(POWER_DOMAIN_INIT))
412 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS (               \
413         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
414         BIT(POWER_DOMAIN_INIT))
415 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS (               \
416         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
417         BIT(POWER_DOMAIN_INIT))
418 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
419         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
420         BIT(POWER_DOMAIN_MODESET) |                     \
421         BIT(POWER_DOMAIN_AUX_A) |                       \
422         BIT(POWER_DOMAIN_INIT))
423
424 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
425         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
426         BIT(POWER_DOMAIN_PIPE_B) |                      \
427         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
428         BIT(POWER_DOMAIN_PIPE_C) |                      \
429         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
430         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
431         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
432         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
433         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
434         BIT(POWER_DOMAIN_AUX_B) |                       \
435         BIT(POWER_DOMAIN_AUX_C) |                       \
436         BIT(POWER_DOMAIN_AUDIO) |                       \
437         BIT(POWER_DOMAIN_VGA) |                         \
438         BIT(POWER_DOMAIN_GMBUS) |                       \
439         BIT(POWER_DOMAIN_INIT))
440 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
441         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
442         BIT(POWER_DOMAIN_MODESET) |                     \
443         BIT(POWER_DOMAIN_AUX_A) |                       \
444         BIT(POWER_DOMAIN_INIT))
445 #define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
446         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
447         BIT(POWER_DOMAIN_AUX_A) |                       \
448         BIT(POWER_DOMAIN_INIT))
449 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
450         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
451         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
452         BIT(POWER_DOMAIN_AUX_B) |                       \
453         BIT(POWER_DOMAIN_AUX_C) |                       \
454         BIT(POWER_DOMAIN_INIT))
455
456 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
457         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
458         BIT(POWER_DOMAIN_PIPE_B) |                      \
459         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
460         BIT(POWER_DOMAIN_PIPE_C) |                      \
461         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
462         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
463         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
464         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
465         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
466         BIT(POWER_DOMAIN_AUX_B) |                       \
467         BIT(POWER_DOMAIN_AUX_C) |                       \
468         BIT(POWER_DOMAIN_AUDIO) |                       \
469         BIT(POWER_DOMAIN_VGA) |                         \
470         BIT(POWER_DOMAIN_INIT))
471 #define GLK_DISPLAY_DDI_A_POWER_DOMAINS (               \
472         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
473         BIT(POWER_DOMAIN_INIT))
474 #define GLK_DISPLAY_DDI_B_POWER_DOMAINS (               \
475         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
476         BIT(POWER_DOMAIN_INIT))
477 #define GLK_DISPLAY_DDI_C_POWER_DOMAINS (               \
478         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
479         BIT(POWER_DOMAIN_INIT))
480 #define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
481         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
482         BIT(POWER_DOMAIN_AUX_A) |                       \
483         BIT(POWER_DOMAIN_INIT))
484 #define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
485         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
486         BIT(POWER_DOMAIN_AUX_B) |                       \
487         BIT(POWER_DOMAIN_INIT))
488 #define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
489         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
490         BIT(POWER_DOMAIN_AUX_C) |                       \
491         BIT(POWER_DOMAIN_INIT))
492 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
493         BIT(POWER_DOMAIN_AUX_A) |               \
494         BIT(POWER_DOMAIN_INIT))
495 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
496         BIT(POWER_DOMAIN_AUX_B) |               \
497         BIT(POWER_DOMAIN_INIT))
498 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
499         BIT(POWER_DOMAIN_AUX_C) |               \
500         BIT(POWER_DOMAIN_INIT))
501 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
502         GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
503         BIT(POWER_DOMAIN_MODESET) |                     \
504         BIT(POWER_DOMAIN_AUX_A) |                       \
505         BIT(POWER_DOMAIN_INIT))
506
507 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
508 {
509         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
510                   "DC9 already programmed to be enabled.\n");
511         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
512                   "DC5 still not disabled to enable DC9.\n");
513         WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
514         WARN_ONCE(intel_irqs_enabled(dev_priv),
515                   "Interrupts not disabled yet.\n");
516
517          /*
518           * TODO: check for the following to verify the conditions to enter DC9
519           * state are satisfied:
520           * 1] Check relevant display engine registers to verify if mode set
521           * disable sequence was followed.
522           * 2] Check if display uninitialize sequence is initialized.
523           */
524 }
525
526 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
527 {
528         WARN_ONCE(intel_irqs_enabled(dev_priv),
529                   "Interrupts not disabled yet.\n");
530         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
531                   "DC5 still not disabled.\n");
532
533          /*
534           * TODO: check for the following to verify DC9 state was indeed
535           * entered before programming to disable it:
536           * 1] Check relevant display engine registers to verify if mode
537           *  set disable sequence was followed.
538           * 2] Check if display uninitialize sequence is initialized.
539           */
540 }
541
542 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
543                                 u32 state)
544 {
545         int rewrites = 0;
546         int rereads = 0;
547         u32 v;
548
549         I915_WRITE(DC_STATE_EN, state);
550
551         /* It has been observed that disabling the dc6 state sometimes
552          * doesn't stick and dmc keeps returning old value. Make sure
553          * the write really sticks enough times and also force rewrite until
554          * we are confident that state is exactly what we want.
555          */
556         do  {
557                 v = I915_READ(DC_STATE_EN);
558
559                 if (v != state) {
560                         I915_WRITE(DC_STATE_EN, state);
561                         rewrites++;
562                         rereads = 0;
563                 } else if (rereads++ > 5) {
564                         break;
565                 }
566
567         } while (rewrites < 100);
568
569         if (v != state)
570                 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
571                           state, v);
572
573         /* Most of the times we need one retry, avoid spam */
574         if (rewrites > 1)
575                 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
576                               state, rewrites);
577 }
578
579 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
580 {
581         u32 mask;
582
583         mask = DC_STATE_EN_UPTO_DC5;
584         if (IS_GEN9_LP(dev_priv))
585                 mask |= DC_STATE_EN_DC9;
586         else
587                 mask |= DC_STATE_EN_UPTO_DC6;
588
589         return mask;
590 }
591
592 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
593 {
594         u32 val;
595
596         val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
597
598         DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
599                       dev_priv->csr.dc_state, val);
600         dev_priv->csr.dc_state = val;
601 }
602
603 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
604 {
605         uint32_t val;
606         uint32_t mask;
607
608         if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
609                 state &= dev_priv->csr.allowed_dc_mask;
610
611         val = I915_READ(DC_STATE_EN);
612         mask = gen9_dc_mask(dev_priv);
613         DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
614                       val & mask, state);
615
616         /* Check if DMC is ignoring our DC state requests */
617         if ((val & mask) != dev_priv->csr.dc_state)
618                 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
619                           dev_priv->csr.dc_state, val & mask);
620
621         val &= ~mask;
622         val |= state;
623
624         gen9_write_dc_state(dev_priv, val);
625
626         dev_priv->csr.dc_state = val & mask;
627 }
628
629 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
630 {
631         assert_can_enable_dc9(dev_priv);
632
633         DRM_DEBUG_KMS("Enabling DC9\n");
634
635         intel_power_sequencer_reset(dev_priv);
636         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
637 }
638
639 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
640 {
641         assert_can_disable_dc9(dev_priv);
642
643         DRM_DEBUG_KMS("Disabling DC9\n");
644
645         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
646
647         intel_pps_unlock_regs_wa(dev_priv);
648 }
649
650 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
651 {
652         WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
653                   "CSR program storage start is NULL\n");
654         WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
655         WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
656 }
657
658 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
659 {
660         bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
661                                         SKL_DISP_PW_2);
662
663         WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
664
665         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
666                   "DC5 already programmed to be enabled.\n");
667         assert_rpm_wakelock_held(dev_priv);
668
669         assert_csr_loaded(dev_priv);
670 }
671
672 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
673 {
674         assert_can_enable_dc5(dev_priv);
675
676         DRM_DEBUG_KMS("Enabling DC5\n");
677
678         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
679 }
680
681 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
682 {
683         WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
684                   "Backlight is not disabled.\n");
685         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
686                   "DC6 already programmed to be enabled.\n");
687
688         assert_csr_loaded(dev_priv);
689 }
690
691 void skl_enable_dc6(struct drm_i915_private *dev_priv)
692 {
693         assert_can_enable_dc6(dev_priv);
694
695         DRM_DEBUG_KMS("Enabling DC6\n");
696
697         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
698
699 }
700
701 void skl_disable_dc6(struct drm_i915_private *dev_priv)
702 {
703         DRM_DEBUG_KMS("Disabling DC6\n");
704
705         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
706 }
707
708 static void
709 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
710                                   struct i915_power_well *power_well)
711 {
712         enum skl_disp_power_wells power_well_id = power_well->id;
713         u32 val;
714         u32 mask;
715
716         mask = SKL_POWER_WELL_REQ(power_well_id);
717
718         val = I915_READ(HSW_PWR_WELL_KVMR);
719         if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
720                       power_well->name))
721                 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
722
723         val = I915_READ(HSW_PWR_WELL_BIOS);
724         val |= I915_READ(HSW_PWR_WELL_DEBUG);
725
726         if (!(val & mask))
727                 return;
728
729         /*
730          * DMC is known to force on the request bits for power well 1 on SKL
731          * and BXT and the misc IO power well on SKL but we don't expect any
732          * other request bits to be set, so WARN for those.
733          */
734         if (power_well_id == SKL_DISP_PW_1 ||
735             ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
736              power_well_id == SKL_DISP_PW_MISC_IO))
737                 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
738                                  "by DMC\n", power_well->name);
739         else
740                 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
741                           power_well->name);
742
743         I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
744         I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
745 }
746
747 static void skl_set_power_well(struct drm_i915_private *dev_priv,
748                                struct i915_power_well *power_well, bool enable)
749 {
750         uint32_t tmp, fuse_status;
751         uint32_t req_mask, state_mask;
752         bool is_enabled, enable_requested, check_fuse_status = false;
753
754         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
755         fuse_status = I915_READ(SKL_FUSE_STATUS);
756
757         switch (power_well->id) {
758         case SKL_DISP_PW_1:
759                 if (intel_wait_for_register(dev_priv,
760                                             SKL_FUSE_STATUS,
761                                             SKL_FUSE_PG0_DIST_STATUS,
762                                             SKL_FUSE_PG0_DIST_STATUS,
763                                             1)) {
764                         DRM_ERROR("PG0 not enabled\n");
765                         return;
766                 }
767                 break;
768         case SKL_DISP_PW_2:
769                 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
770                         DRM_ERROR("PG1 in disabled state\n");
771                         return;
772                 }
773                 break;
774         case SKL_DISP_PW_MISC_IO:
775         case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
776         case SKL_DISP_PW_DDI_B:
777         case SKL_DISP_PW_DDI_C:
778         case SKL_DISP_PW_DDI_D:
779         case GLK_DISP_PW_AUX_A:
780         case GLK_DISP_PW_AUX_B:
781         case GLK_DISP_PW_AUX_C:
782                 break;
783         default:
784                 WARN(1, "Unknown power well %lu\n", power_well->id);
785                 return;
786         }
787
788         req_mask = SKL_POWER_WELL_REQ(power_well->id);
789         enable_requested = tmp & req_mask;
790         state_mask = SKL_POWER_WELL_STATE(power_well->id);
791         is_enabled = tmp & state_mask;
792
793         if (!enable && enable_requested)
794                 skl_power_well_pre_disable(dev_priv, power_well);
795
796         if (enable) {
797                 if (!enable_requested) {
798                         WARN((tmp & state_mask) &&
799                                 !I915_READ(HSW_PWR_WELL_BIOS),
800                                 "Invalid for power well status to be enabled, unless done by the BIOS, \
801                                 when request is to disable!\n");
802                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
803                 }
804
805                 if (!is_enabled) {
806                         DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
807                         check_fuse_status = true;
808                 }
809         } else {
810                 if (enable_requested) {
811                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
812                         POSTING_READ(HSW_PWR_WELL_DRIVER);
813                         DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
814                 }
815
816                 if (IS_GEN9(dev_priv))
817                         gen9_sanitize_power_well_requests(dev_priv, power_well);
818         }
819
820         if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
821                      1))
822                 DRM_ERROR("%s %s timeout\n",
823                           power_well->name, enable ? "enable" : "disable");
824
825         if (check_fuse_status) {
826                 if (power_well->id == SKL_DISP_PW_1) {
827                         if (intel_wait_for_register(dev_priv,
828                                                     SKL_FUSE_STATUS,
829                                                     SKL_FUSE_PG1_DIST_STATUS,
830                                                     SKL_FUSE_PG1_DIST_STATUS,
831                                                     1))
832                                 DRM_ERROR("PG1 distributing status timeout\n");
833                 } else if (power_well->id == SKL_DISP_PW_2) {
834                         if (intel_wait_for_register(dev_priv,
835                                                     SKL_FUSE_STATUS,
836                                                     SKL_FUSE_PG2_DIST_STATUS,
837                                                     SKL_FUSE_PG2_DIST_STATUS,
838                                                     1))
839                                 DRM_ERROR("PG2 distributing status timeout\n");
840                 }
841         }
842
843         if (enable && !is_enabled)
844                 skl_power_well_post_enable(dev_priv, power_well);
845 }
846
847 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
848                                    struct i915_power_well *power_well)
849 {
850         hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
851
852         /*
853          * We're taking over the BIOS, so clear any requests made by it since
854          * the driver is in charge now.
855          */
856         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
857                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
858 }
859
860 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
861                                   struct i915_power_well *power_well)
862 {
863         hsw_set_power_well(dev_priv, power_well, true);
864 }
865
866 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
867                                    struct i915_power_well *power_well)
868 {
869         hsw_set_power_well(dev_priv, power_well, false);
870 }
871
872 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
873                                         struct i915_power_well *power_well)
874 {
875         uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
876                 SKL_POWER_WELL_STATE(power_well->id);
877
878         return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
879 }
880
881 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
882                                 struct i915_power_well *power_well)
883 {
884         skl_set_power_well(dev_priv, power_well, power_well->count > 0);
885
886         /* Clear any request made by BIOS as driver is taking over */
887         I915_WRITE(HSW_PWR_WELL_BIOS, 0);
888 }
889
890 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
891                                 struct i915_power_well *power_well)
892 {
893         skl_set_power_well(dev_priv, power_well, true);
894 }
895
896 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
897                                 struct i915_power_well *power_well)
898 {
899         skl_set_power_well(dev_priv, power_well, false);
900 }
901
902 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
903                                            struct i915_power_well *power_well)
904 {
905         bxt_ddi_phy_init(dev_priv, power_well->data);
906 }
907
908 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
909                                             struct i915_power_well *power_well)
910 {
911         bxt_ddi_phy_uninit(dev_priv, power_well->data);
912 }
913
914 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
915                                             struct i915_power_well *power_well)
916 {
917         return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
918 }
919
920 static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
921                                             struct i915_power_well *power_well)
922 {
923         if (power_well->count > 0)
924                 bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
925         else
926                 bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
927 }
928
929
930 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
931 {
932         struct i915_power_well *power_well;
933
934         power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
935         if (power_well->count > 0)
936                 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
937
938         power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
939         if (power_well->count > 0)
940                 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
941
942         if (IS_GEMINILAKE(dev_priv)) {
943                 power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
944                 if (power_well->count > 0)
945                         bxt_ddi_phy_verify_state(dev_priv, power_well->data);
946         }
947 }
948
949 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
950                                            struct i915_power_well *power_well)
951 {
952         return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
953 }
954
955 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
956 {
957         u32 tmp = I915_READ(DBUF_CTL);
958
959         WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
960              (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
961              "Unexpected DBuf power power state (0x%08x)\n", tmp);
962 }
963
964 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
965                                           struct i915_power_well *power_well)
966 {
967         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
968
969         WARN_ON(dev_priv->cdclk_freq !=
970                 dev_priv->display.get_display_clock_speed(dev_priv));
971
972         gen9_assert_dbuf_enabled(dev_priv);
973
974         if (IS_GEN9_LP(dev_priv))
975                 bxt_verify_ddi_phy_power_wells(dev_priv);
976 }
977
978 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
979                                            struct i915_power_well *power_well)
980 {
981         if (!dev_priv->csr.dmc_payload)
982                 return;
983
984         if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
985                 skl_enable_dc6(dev_priv);
986         else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
987                 gen9_enable_dc5(dev_priv);
988 }
989
990 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
991                                            struct i915_power_well *power_well)
992 {
993         if (power_well->count > 0)
994                 gen9_dc_off_power_well_enable(dev_priv, power_well);
995         else
996                 gen9_dc_off_power_well_disable(dev_priv, power_well);
997 }
998
999 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1000                                            struct i915_power_well *power_well)
1001 {
1002 }
1003
1004 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1005                                              struct i915_power_well *power_well)
1006 {
1007         return true;
1008 }
1009
1010 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1011                                struct i915_power_well *power_well, bool enable)
1012 {
1013         enum punit_power_well power_well_id = power_well->id;
1014         u32 mask;
1015         u32 state;
1016         u32 ctrl;
1017
1018         mask = PUNIT_PWRGT_MASK(power_well_id);
1019         state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
1020                          PUNIT_PWRGT_PWR_GATE(power_well_id);
1021
1022         mutex_lock(&dev_priv->rps.hw_lock);
1023
1024 #define COND \
1025         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1026
1027         if (COND)
1028                 goto out;
1029
1030         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1031         ctrl &= ~mask;
1032         ctrl |= state;
1033         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1034
1035         if (wait_for(COND, 100))
1036                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1037                           state,
1038                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1039
1040 #undef COND
1041
1042 out:
1043         mutex_unlock(&dev_priv->rps.hw_lock);
1044 }
1045
1046 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
1047                                    struct i915_power_well *power_well)
1048 {
1049         vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
1050 }
1051
1052 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1053                                   struct i915_power_well *power_well)
1054 {
1055         vlv_set_power_well(dev_priv, power_well, true);
1056 }
1057
1058 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1059                                    struct i915_power_well *power_well)
1060 {
1061         vlv_set_power_well(dev_priv, power_well, false);
1062 }
1063
1064 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1065                                    struct i915_power_well *power_well)
1066 {
1067         int power_well_id = power_well->id;
1068         bool enabled = false;
1069         u32 mask;
1070         u32 state;
1071         u32 ctrl;
1072
1073         mask = PUNIT_PWRGT_MASK(power_well_id);
1074         ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1075
1076         mutex_lock(&dev_priv->rps.hw_lock);
1077
1078         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1079         /*
1080          * We only ever set the power-on and power-gate states, anything
1081          * else is unexpected.
1082          */
1083         WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1084                 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1085         if (state == ctrl)
1086                 enabled = true;
1087
1088         /*
1089          * A transient state at this point would mean some unexpected party
1090          * is poking at the power controls too.
1091          */
1092         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1093         WARN_ON(ctrl != state);
1094
1095         mutex_unlock(&dev_priv->rps.hw_lock);
1096
1097         return enabled;
1098 }
1099
1100 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1101 {
1102         u32 val;
1103
1104         /*
1105          * On driver load, a pipe may be active and driving a DSI display.
1106          * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1107          * (and never recovering) in this case. intel_dsi_post_disable() will
1108          * clear it when we turn off the display.
1109          */
1110         val = I915_READ(DSPCLK_GATE_D);
1111         val &= DPOUNIT_CLOCK_GATE_DISABLE;
1112         val |= VRHUNIT_CLOCK_GATE_DISABLE;
1113         I915_WRITE(DSPCLK_GATE_D, val);
1114
1115         /*
1116          * Disable trickle feed and enable pnd deadline calculation
1117          */
1118         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1119         I915_WRITE(CBR1_VLV, 0);
1120
1121         WARN_ON(dev_priv->rawclk_freq == 0);
1122
1123         I915_WRITE(RAWCLK_FREQ_VLV,
1124                    DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1125 }
1126
1127 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1128 {
1129         struct intel_encoder *encoder;
1130         enum pipe pipe;
1131
1132         /*
1133          * Enable the CRI clock source so we can get at the
1134          * display and the reference clock for VGA
1135          * hotplug / manual detection. Supposedly DSI also
1136          * needs the ref clock up and running.
1137          *
1138          * CHV DPLL B/C have some issues if VGA mode is enabled.
1139          */
1140         for_each_pipe(dev_priv, pipe) {
1141                 u32 val = I915_READ(DPLL(pipe));
1142
1143                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1144                 if (pipe != PIPE_A)
1145                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1146
1147                 I915_WRITE(DPLL(pipe), val);
1148         }
1149
1150         vlv_init_display_clock_gating(dev_priv);
1151
1152         spin_lock_irq(&dev_priv->irq_lock);
1153         valleyview_enable_display_irqs(dev_priv);
1154         spin_unlock_irq(&dev_priv->irq_lock);
1155
1156         /*
1157          * During driver initialization/resume we can avoid restoring the
1158          * part of the HW/SW state that will be inited anyway explicitly.
1159          */
1160         if (dev_priv->power_domains.initializing)
1161                 return;
1162
1163         intel_hpd_init(dev_priv);
1164
1165         /* Re-enable the ADPA, if we have one */
1166         for_each_intel_encoder(&dev_priv->drm, encoder) {
1167                 if (encoder->type == INTEL_OUTPUT_ANALOG)
1168                         intel_crt_reset(&encoder->base);
1169         }
1170
1171         i915_redisable_vga_power_on(dev_priv);
1172
1173         intel_pps_unlock_regs_wa(dev_priv);
1174 }
1175
1176 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1177 {
1178         spin_lock_irq(&dev_priv->irq_lock);
1179         valleyview_disable_display_irqs(dev_priv);
1180         spin_unlock_irq(&dev_priv->irq_lock);
1181
1182         /* make sure we're done processing display irqs */
1183         synchronize_irq(dev_priv->drm.irq);
1184
1185         intel_power_sequencer_reset(dev_priv);
1186
1187         /* Prevent us from re-enabling polling on accident in late suspend */
1188         if (!dev_priv->drm.dev->power.is_suspended)
1189                 intel_hpd_poll_init(dev_priv);
1190 }
1191
1192 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1193                                           struct i915_power_well *power_well)
1194 {
1195         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1196
1197         vlv_set_power_well(dev_priv, power_well, true);
1198
1199         vlv_display_power_well_init(dev_priv);
1200 }
1201
1202 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1203                                            struct i915_power_well *power_well)
1204 {
1205         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1206
1207         vlv_display_power_well_deinit(dev_priv);
1208
1209         vlv_set_power_well(dev_priv, power_well, false);
1210 }
1211
1212 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1213                                            struct i915_power_well *power_well)
1214 {
1215         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1216
1217         /* since ref/cri clock was enabled */
1218         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1219
1220         vlv_set_power_well(dev_priv, power_well, true);
1221
1222         /*
1223          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1224          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1225          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1226          *   b. The other bits such as sfr settings / modesel may all
1227          *      be set to 0.
1228          *
1229          * This should only be done on init and resume from S3 with
1230          * both PLLs disabled, or we risk losing DPIO and PLL
1231          * synchronization.
1232          */
1233         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1234 }
1235
1236 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1237                                             struct i915_power_well *power_well)
1238 {
1239         enum pipe pipe;
1240
1241         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1242
1243         for_each_pipe(dev_priv, pipe)
1244                 assert_pll_disabled(dev_priv, pipe);
1245
1246         /* Assert common reset */
1247         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1248
1249         vlv_set_power_well(dev_priv, power_well, false);
1250 }
1251
1252 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1253
1254 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1255                                                  int power_well_id)
1256 {
1257         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1258         int i;
1259
1260         for (i = 0; i < power_domains->power_well_count; i++) {
1261                 struct i915_power_well *power_well;
1262
1263                 power_well = &power_domains->power_wells[i];
1264                 if (power_well->id == power_well_id)
1265                         return power_well;
1266         }
1267
1268         return NULL;
1269 }
1270
1271 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1272
1273 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1274 {
1275         struct i915_power_well *cmn_bc =
1276                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1277         struct i915_power_well *cmn_d =
1278                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1279         u32 phy_control = dev_priv->chv_phy_control;
1280         u32 phy_status = 0;
1281         u32 phy_status_mask = 0xffffffff;
1282
1283         /*
1284          * The BIOS can leave the PHY is some weird state
1285          * where it doesn't fully power down some parts.
1286          * Disable the asserts until the PHY has been fully
1287          * reset (ie. the power well has been disabled at
1288          * least once).
1289          */
1290         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1291                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1292                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1293                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1294                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1295                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1296                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1297
1298         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1299                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1300                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1301                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1302
1303         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1304                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1305
1306                 /* this assumes override is only used to enable lanes */
1307                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1308                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1309
1310                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1311                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1312
1313                 /* CL1 is on whenever anything is on in either channel */
1314                 if (BITS_SET(phy_control,
1315                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1316                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1317                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1318
1319                 /*
1320                  * The DPLLB check accounts for the pipe B + port A usage
1321                  * with CL2 powered up but all the lanes in the second channel
1322                  * powered down.
1323                  */
1324                 if (BITS_SET(phy_control,
1325                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1326                     (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1327                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1328
1329                 if (BITS_SET(phy_control,
1330                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1331                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1332                 if (BITS_SET(phy_control,
1333                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1334                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1335
1336                 if (BITS_SET(phy_control,
1337                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1338                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1339                 if (BITS_SET(phy_control,
1340                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1341                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1342         }
1343
1344         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1345                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1346
1347                 /* this assumes override is only used to enable lanes */
1348                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1349                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1350
1351                 if (BITS_SET(phy_control,
1352                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1353                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1354
1355                 if (BITS_SET(phy_control,
1356                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1357                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1358                 if (BITS_SET(phy_control,
1359                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1360                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1361         }
1362
1363         phy_status &= phy_status_mask;
1364
1365         /*
1366          * The PHY may be busy with some initial calibration and whatnot,
1367          * so the power state can take a while to actually change.
1368          */
1369         if (intel_wait_for_register(dev_priv,
1370                                     DISPLAY_PHY_STATUS,
1371                                     phy_status_mask,
1372                                     phy_status,
1373                                     10))
1374                 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1375                           I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1376                            phy_status, dev_priv->chv_phy_control);
1377 }
1378
1379 #undef BITS_SET
1380
1381 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1382                                            struct i915_power_well *power_well)
1383 {
1384         enum dpio_phy phy;
1385         enum pipe pipe;
1386         uint32_t tmp;
1387
1388         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1389                      power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1390
1391         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1392                 pipe = PIPE_A;
1393                 phy = DPIO_PHY0;
1394         } else {
1395                 pipe = PIPE_C;
1396                 phy = DPIO_PHY1;
1397         }
1398
1399         /* since ref/cri clock was enabled */
1400         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1401         vlv_set_power_well(dev_priv, power_well, true);
1402
1403         /* Poll for phypwrgood signal */
1404         if (intel_wait_for_register(dev_priv,
1405                                     DISPLAY_PHY_STATUS,
1406                                     PHY_POWERGOOD(phy),
1407                                     PHY_POWERGOOD(phy),
1408                                     1))
1409                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1410
1411         mutex_lock(&dev_priv->sb_lock);
1412
1413         /* Enable dynamic power down */
1414         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1415         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1416                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1417         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1418
1419         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1420                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1421                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1422                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1423         } else {
1424                 /*
1425                  * Force the non-existing CL2 off. BXT does this
1426                  * too, so maybe it saves some power even though
1427                  * CL2 doesn't exist?
1428                  */
1429                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1430                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1431                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1432         }
1433
1434         mutex_unlock(&dev_priv->sb_lock);
1435
1436         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1437         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1438
1439         DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1440                       phy, dev_priv->chv_phy_control);
1441
1442         assert_chv_phy_status(dev_priv);
1443 }
1444
1445 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1446                                             struct i915_power_well *power_well)
1447 {
1448         enum dpio_phy phy;
1449
1450         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1451                      power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1452
1453         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1454                 phy = DPIO_PHY0;
1455                 assert_pll_disabled(dev_priv, PIPE_A);
1456                 assert_pll_disabled(dev_priv, PIPE_B);
1457         } else {
1458                 phy = DPIO_PHY1;
1459                 assert_pll_disabled(dev_priv, PIPE_C);
1460         }
1461
1462         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1463         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1464
1465         vlv_set_power_well(dev_priv, power_well, false);
1466
1467         DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1468                       phy, dev_priv->chv_phy_control);
1469
1470         /* PHY is fully reset now, so we can enable the PHY state asserts */
1471         dev_priv->chv_phy_assert[phy] = true;
1472
1473         assert_chv_phy_status(dev_priv);
1474 }
1475
1476 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1477                                      enum dpio_channel ch, bool override, unsigned int mask)
1478 {
1479         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1480         u32 reg, val, expected, actual;
1481
1482         /*
1483          * The BIOS can leave the PHY is some weird state
1484          * where it doesn't fully power down some parts.
1485          * Disable the asserts until the PHY has been fully
1486          * reset (ie. the power well has been disabled at
1487          * least once).
1488          */
1489         if (!dev_priv->chv_phy_assert[phy])
1490                 return;
1491
1492         if (ch == DPIO_CH0)
1493                 reg = _CHV_CMN_DW0_CH0;
1494         else
1495                 reg = _CHV_CMN_DW6_CH1;
1496
1497         mutex_lock(&dev_priv->sb_lock);
1498         val = vlv_dpio_read(dev_priv, pipe, reg);
1499         mutex_unlock(&dev_priv->sb_lock);
1500
1501         /*
1502          * This assumes !override is only used when the port is disabled.
1503          * All lanes should power down even without the override when
1504          * the port is disabled.
1505          */
1506         if (!override || mask == 0xf) {
1507                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1508                 /*
1509                  * If CH1 common lane is not active anymore
1510                  * (eg. for pipe B DPLL) the entire channel will
1511                  * shut down, which causes the common lane registers
1512                  * to read as 0. That means we can't actually check
1513                  * the lane power down status bits, but as the entire
1514                  * register reads as 0 it's a good indication that the
1515                  * channel is indeed entirely powered down.
1516                  */
1517                 if (ch == DPIO_CH1 && val == 0)
1518                         expected = 0;
1519         } else if (mask != 0x0) {
1520                 expected = DPIO_ANYDL_POWERDOWN;
1521         } else {
1522                 expected = 0;
1523         }
1524
1525         if (ch == DPIO_CH0)
1526                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1527         else
1528                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1529         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1530
1531         WARN(actual != expected,
1532              "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1533              !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1534              !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1535              reg, val);
1536 }
1537
1538 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1539                           enum dpio_channel ch, bool override)
1540 {
1541         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1542         bool was_override;
1543
1544         mutex_lock(&power_domains->lock);
1545
1546         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1547
1548         if (override == was_override)
1549                 goto out;
1550
1551         if (override)
1552                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1553         else
1554                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1555
1556         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1557
1558         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1559                       phy, ch, dev_priv->chv_phy_control);
1560
1561         assert_chv_phy_status(dev_priv);
1562
1563 out:
1564         mutex_unlock(&power_domains->lock);
1565
1566         return was_override;
1567 }
1568
1569 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1570                              bool override, unsigned int mask)
1571 {
1572         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1573         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1574         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1575         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1576
1577         mutex_lock(&power_domains->lock);
1578
1579         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1580         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1581
1582         if (override)
1583                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1584         else
1585                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1586
1587         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1588
1589         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1590                       phy, ch, mask, dev_priv->chv_phy_control);
1591
1592         assert_chv_phy_status(dev_priv);
1593
1594         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1595
1596         mutex_unlock(&power_domains->lock);
1597 }
1598
1599 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1600                                         struct i915_power_well *power_well)
1601 {
1602         enum pipe pipe = power_well->id;
1603         bool enabled;
1604         u32 state, ctrl;
1605
1606         mutex_lock(&dev_priv->rps.hw_lock);
1607
1608         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1609         /*
1610          * We only ever set the power-on and power-gate states, anything
1611          * else is unexpected.
1612          */
1613         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1614         enabled = state == DP_SSS_PWR_ON(pipe);
1615
1616         /*
1617          * A transient state at this point would mean some unexpected party
1618          * is poking at the power controls too.
1619          */
1620         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1621         WARN_ON(ctrl << 16 != state);
1622
1623         mutex_unlock(&dev_priv->rps.hw_lock);
1624
1625         return enabled;
1626 }
1627
1628 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1629                                     struct i915_power_well *power_well,
1630                                     bool enable)
1631 {
1632         enum pipe pipe = power_well->id;
1633         u32 state;
1634         u32 ctrl;
1635
1636         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1637
1638         mutex_lock(&dev_priv->rps.hw_lock);
1639
1640 #define COND \
1641         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1642
1643         if (COND)
1644                 goto out;
1645
1646         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1647         ctrl &= ~DP_SSC_MASK(pipe);
1648         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1649         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1650
1651         if (wait_for(COND, 100))
1652                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1653                           state,
1654                           vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1655
1656 #undef COND
1657
1658 out:
1659         mutex_unlock(&dev_priv->rps.hw_lock);
1660 }
1661
1662 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1663                                         struct i915_power_well *power_well)
1664 {
1665         WARN_ON_ONCE(power_well->id != PIPE_A);
1666
1667         chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1668 }
1669
1670 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1671                                        struct i915_power_well *power_well)
1672 {
1673         WARN_ON_ONCE(power_well->id != PIPE_A);
1674
1675         chv_set_pipe_power_well(dev_priv, power_well, true);
1676
1677         vlv_display_power_well_init(dev_priv);
1678 }
1679
1680 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1681                                         struct i915_power_well *power_well)
1682 {
1683         WARN_ON_ONCE(power_well->id != PIPE_A);
1684
1685         vlv_display_power_well_deinit(dev_priv);
1686
1687         chv_set_pipe_power_well(dev_priv, power_well, false);
1688 }
1689
1690 static void
1691 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1692                                  enum intel_display_power_domain domain)
1693 {
1694         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1695         struct i915_power_well *power_well;
1696         int i;
1697
1698         for_each_power_well(i, power_well, BIT(domain), power_domains)
1699                 intel_power_well_get(dev_priv, power_well);
1700
1701         power_domains->domain_use_count[domain]++;
1702 }
1703
1704 /**
1705  * intel_display_power_get - grab a power domain reference
1706  * @dev_priv: i915 device instance
1707  * @domain: power domain to reference
1708  *
1709  * This function grabs a power domain reference for @domain and ensures that the
1710  * power domain and all its parents are powered up. Therefore users should only
1711  * grab a reference to the innermost power domain they need.
1712  *
1713  * Any power domain reference obtained by this function must have a symmetric
1714  * call to intel_display_power_put() to release the reference again.
1715  */
1716 void intel_display_power_get(struct drm_i915_private *dev_priv,
1717                              enum intel_display_power_domain domain)
1718 {
1719         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1720
1721         intel_runtime_pm_get(dev_priv);
1722
1723         mutex_lock(&power_domains->lock);
1724
1725         __intel_display_power_get_domain(dev_priv, domain);
1726
1727         mutex_unlock(&power_domains->lock);
1728 }
1729
1730 /**
1731  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1732  * @dev_priv: i915 device instance
1733  * @domain: power domain to reference
1734  *
1735  * This function grabs a power domain reference for @domain and ensures that the
1736  * power domain and all its parents are powered up. Therefore users should only
1737  * grab a reference to the innermost power domain they need.
1738  *
1739  * Any power domain reference obtained by this function must have a symmetric
1740  * call to intel_display_power_put() to release the reference again.
1741  */
1742 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1743                                         enum intel_display_power_domain domain)
1744 {
1745         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1746         bool is_enabled;
1747
1748         if (!intel_runtime_pm_get_if_in_use(dev_priv))
1749                 return false;
1750
1751         mutex_lock(&power_domains->lock);
1752
1753         if (__intel_display_power_is_enabled(dev_priv, domain)) {
1754                 __intel_display_power_get_domain(dev_priv, domain);
1755                 is_enabled = true;
1756         } else {
1757                 is_enabled = false;
1758         }
1759
1760         mutex_unlock(&power_domains->lock);
1761
1762         if (!is_enabled)
1763                 intel_runtime_pm_put(dev_priv);
1764
1765         return is_enabled;
1766 }
1767
1768 /**
1769  * intel_display_power_put - release a power domain reference
1770  * @dev_priv: i915 device instance
1771  * @domain: power domain to reference
1772  *
1773  * This function drops the power domain reference obtained by
1774  * intel_display_power_get() and might power down the corresponding hardware
1775  * block right away if this is the last reference.
1776  */
1777 void intel_display_power_put(struct drm_i915_private *dev_priv,
1778                              enum intel_display_power_domain domain)
1779 {
1780         struct i915_power_domains *power_domains;
1781         struct i915_power_well *power_well;
1782         int i;
1783
1784         power_domains = &dev_priv->power_domains;
1785
1786         mutex_lock(&power_domains->lock);
1787
1788         WARN(!power_domains->domain_use_count[domain],
1789              "Use count on domain %s is already zero\n",
1790              intel_display_power_domain_str(domain));
1791         power_domains->domain_use_count[domain]--;
1792
1793         for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
1794                 intel_power_well_put(dev_priv, power_well);
1795
1796         mutex_unlock(&power_domains->lock);
1797
1798         intel_runtime_pm_put(dev_priv);
1799 }
1800
1801 #define HSW_DISPLAY_POWER_DOMAINS (                     \
1802         BIT(POWER_DOMAIN_PIPE_B) |                      \
1803         BIT(POWER_DOMAIN_PIPE_C) |                      \
1804         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |         \
1805         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
1806         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
1807         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
1808         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
1809         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
1810         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
1811         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
1812         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
1813         BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */        \
1814         BIT(POWER_DOMAIN_VGA) |                         \
1815         BIT(POWER_DOMAIN_AUDIO) |                       \
1816         BIT(POWER_DOMAIN_INIT))
1817
1818 #define BDW_DISPLAY_POWER_DOMAINS (                     \
1819         BIT(POWER_DOMAIN_PIPE_B) |                      \
1820         BIT(POWER_DOMAIN_PIPE_C) |                      \
1821         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
1822         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
1823         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
1824         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
1825         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
1826         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
1827         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
1828         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
1829         BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */        \
1830         BIT(POWER_DOMAIN_VGA) |                         \
1831         BIT(POWER_DOMAIN_AUDIO) |                       \
1832         BIT(POWER_DOMAIN_INIT))
1833
1834 #define VLV_DISPLAY_POWER_DOMAINS (             \
1835         BIT(POWER_DOMAIN_PIPE_A) |              \
1836         BIT(POWER_DOMAIN_PIPE_B) |              \
1837         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1838         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1839         BIT(POWER_DOMAIN_TRANSCODER_A) |        \
1840         BIT(POWER_DOMAIN_TRANSCODER_B) |        \
1841         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1842         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1843         BIT(POWER_DOMAIN_PORT_DSI) |            \
1844         BIT(POWER_DOMAIN_PORT_CRT) |            \
1845         BIT(POWER_DOMAIN_VGA) |                 \
1846         BIT(POWER_DOMAIN_AUDIO) |               \
1847         BIT(POWER_DOMAIN_AUX_B) |               \
1848         BIT(POWER_DOMAIN_AUX_C) |               \
1849         BIT(POWER_DOMAIN_GMBUS) |               \
1850         BIT(POWER_DOMAIN_INIT))
1851
1852 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
1853         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1854         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1855         BIT(POWER_DOMAIN_PORT_CRT) |            \
1856         BIT(POWER_DOMAIN_AUX_B) |               \
1857         BIT(POWER_DOMAIN_AUX_C) |               \
1858         BIT(POWER_DOMAIN_INIT))
1859
1860 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
1861         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1862         BIT(POWER_DOMAIN_AUX_B) |               \
1863         BIT(POWER_DOMAIN_INIT))
1864
1865 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
1866         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1867         BIT(POWER_DOMAIN_AUX_B) |               \
1868         BIT(POWER_DOMAIN_INIT))
1869
1870 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
1871         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1872         BIT(POWER_DOMAIN_AUX_C) |               \
1873         BIT(POWER_DOMAIN_INIT))
1874
1875 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
1876         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1877         BIT(POWER_DOMAIN_AUX_C) |               \
1878         BIT(POWER_DOMAIN_INIT))
1879
1880 #define CHV_DISPLAY_POWER_DOMAINS (             \
1881         BIT(POWER_DOMAIN_PIPE_A) |              \
1882         BIT(POWER_DOMAIN_PIPE_B) |              \
1883         BIT(POWER_DOMAIN_PIPE_C) |              \
1884         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1885         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1886         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1887         BIT(POWER_DOMAIN_TRANSCODER_A) |        \
1888         BIT(POWER_DOMAIN_TRANSCODER_B) |        \
1889         BIT(POWER_DOMAIN_TRANSCODER_C) |        \
1890         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1891         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1892         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |    \
1893         BIT(POWER_DOMAIN_PORT_DSI) |            \
1894         BIT(POWER_DOMAIN_VGA) |                 \
1895         BIT(POWER_DOMAIN_AUDIO) |               \
1896         BIT(POWER_DOMAIN_AUX_B) |               \
1897         BIT(POWER_DOMAIN_AUX_C) |               \
1898         BIT(POWER_DOMAIN_AUX_D) |               \
1899         BIT(POWER_DOMAIN_GMBUS) |               \
1900         BIT(POWER_DOMAIN_INIT))
1901
1902 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
1903         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1904         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1905         BIT(POWER_DOMAIN_AUX_B) |               \
1906         BIT(POWER_DOMAIN_AUX_C) |               \
1907         BIT(POWER_DOMAIN_INIT))
1908
1909 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
1910         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |    \
1911         BIT(POWER_DOMAIN_AUX_D) |               \
1912         BIT(POWER_DOMAIN_INIT))
1913
1914 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1915         .sync_hw = i9xx_always_on_power_well_noop,
1916         .enable = i9xx_always_on_power_well_noop,
1917         .disable = i9xx_always_on_power_well_noop,
1918         .is_enabled = i9xx_always_on_power_well_enabled,
1919 };
1920
1921 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1922         .sync_hw = chv_pipe_power_well_sync_hw,
1923         .enable = chv_pipe_power_well_enable,
1924         .disable = chv_pipe_power_well_disable,
1925         .is_enabled = chv_pipe_power_well_enabled,
1926 };
1927
1928 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1929         .sync_hw = vlv_power_well_sync_hw,
1930         .enable = chv_dpio_cmn_power_well_enable,
1931         .disable = chv_dpio_cmn_power_well_disable,
1932         .is_enabled = vlv_power_well_enabled,
1933 };
1934
1935 static struct i915_power_well i9xx_always_on_power_well[] = {
1936         {
1937                 .name = "always-on",
1938                 .always_on = 1,
1939                 .domains = POWER_DOMAIN_MASK,
1940                 .ops = &i9xx_always_on_power_well_ops,
1941         },
1942 };
1943
1944 static const struct i915_power_well_ops hsw_power_well_ops = {
1945         .sync_hw = hsw_power_well_sync_hw,
1946         .enable = hsw_power_well_enable,
1947         .disable = hsw_power_well_disable,
1948         .is_enabled = hsw_power_well_enabled,
1949 };
1950
1951 static const struct i915_power_well_ops skl_power_well_ops = {
1952         .sync_hw = skl_power_well_sync_hw,
1953         .enable = skl_power_well_enable,
1954         .disable = skl_power_well_disable,
1955         .is_enabled = skl_power_well_enabled,
1956 };
1957
1958 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1959         .sync_hw = gen9_dc_off_power_well_sync_hw,
1960         .enable = gen9_dc_off_power_well_enable,
1961         .disable = gen9_dc_off_power_well_disable,
1962         .is_enabled = gen9_dc_off_power_well_enabled,
1963 };
1964
1965 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1966         .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
1967         .enable = bxt_dpio_cmn_power_well_enable,
1968         .disable = bxt_dpio_cmn_power_well_disable,
1969         .is_enabled = bxt_dpio_cmn_power_well_enabled,
1970 };
1971
1972 static struct i915_power_well hsw_power_wells[] = {
1973         {
1974                 .name = "always-on",
1975                 .always_on = 1,
1976                 .domains = POWER_DOMAIN_MASK,
1977                 .ops = &i9xx_always_on_power_well_ops,
1978         },
1979         {
1980                 .name = "display",
1981                 .domains = HSW_DISPLAY_POWER_DOMAINS,
1982                 .ops = &hsw_power_well_ops,
1983         },
1984 };
1985
1986 static struct i915_power_well bdw_power_wells[] = {
1987         {
1988                 .name = "always-on",
1989                 .always_on = 1,
1990                 .domains = POWER_DOMAIN_MASK,
1991                 .ops = &i9xx_always_on_power_well_ops,
1992         },
1993         {
1994                 .name = "display",
1995                 .domains = BDW_DISPLAY_POWER_DOMAINS,
1996                 .ops = &hsw_power_well_ops,
1997         },
1998 };
1999
2000 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2001         .sync_hw = vlv_power_well_sync_hw,
2002         .enable = vlv_display_power_well_enable,
2003         .disable = vlv_display_power_well_disable,
2004         .is_enabled = vlv_power_well_enabled,
2005 };
2006
2007 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2008         .sync_hw = vlv_power_well_sync_hw,
2009         .enable = vlv_dpio_cmn_power_well_enable,
2010         .disable = vlv_dpio_cmn_power_well_disable,
2011         .is_enabled = vlv_power_well_enabled,
2012 };
2013
2014 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2015         .sync_hw = vlv_power_well_sync_hw,
2016         .enable = vlv_power_well_enable,
2017         .disable = vlv_power_well_disable,
2018         .is_enabled = vlv_power_well_enabled,
2019 };
2020
2021 static struct i915_power_well vlv_power_wells[] = {
2022         {
2023                 .name = "always-on",
2024                 .always_on = 1,
2025                 .domains = POWER_DOMAIN_MASK,
2026                 .ops = &i9xx_always_on_power_well_ops,
2027                 .id = PUNIT_POWER_WELL_ALWAYS_ON,
2028         },
2029         {
2030                 .name = "display",
2031                 .domains = VLV_DISPLAY_POWER_DOMAINS,
2032                 .id = PUNIT_POWER_WELL_DISP2D,
2033                 .ops = &vlv_display_power_well_ops,
2034         },
2035         {
2036                 .name = "dpio-tx-b-01",
2037                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2038                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2039                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2040                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2041                 .ops = &vlv_dpio_power_well_ops,
2042                 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2043         },
2044         {
2045                 .name = "dpio-tx-b-23",
2046                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2047                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2048                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2049                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2050                 .ops = &vlv_dpio_power_well_ops,
2051                 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2052         },
2053         {
2054                 .name = "dpio-tx-c-01",
2055                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2056                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2057                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2058                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2059                 .ops = &vlv_dpio_power_well_ops,
2060                 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2061         },
2062         {
2063                 .name = "dpio-tx-c-23",
2064                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2065                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2066                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2067                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2068                 .ops = &vlv_dpio_power_well_ops,
2069                 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2070         },
2071         {
2072                 .name = "dpio-common",
2073                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2074                 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2075                 .ops = &vlv_dpio_cmn_power_well_ops,
2076         },
2077 };
2078
2079 static struct i915_power_well chv_power_wells[] = {
2080         {
2081                 .name = "always-on",
2082                 .always_on = 1,
2083                 .domains = POWER_DOMAIN_MASK,
2084                 .ops = &i9xx_always_on_power_well_ops,
2085         },
2086         {
2087                 .name = "display",
2088                 /*
2089                  * Pipe A power well is the new disp2d well. Pipe B and C
2090                  * power wells don't actually exist. Pipe A power well is
2091                  * required for any pipe to work.
2092                  */
2093                 .domains = CHV_DISPLAY_POWER_DOMAINS,
2094                 .id = PIPE_A,
2095                 .ops = &chv_pipe_power_well_ops,
2096         },
2097         {
2098                 .name = "dpio-common-bc",
2099                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2100                 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2101                 .ops = &chv_dpio_cmn_power_well_ops,
2102         },
2103         {
2104                 .name = "dpio-common-d",
2105                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2106                 .id = PUNIT_POWER_WELL_DPIO_CMN_D,
2107                 .ops = &chv_dpio_cmn_power_well_ops,
2108         },
2109 };
2110
2111 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2112                                     int power_well_id)
2113 {
2114         struct i915_power_well *power_well;
2115         bool ret;
2116
2117         power_well = lookup_power_well(dev_priv, power_well_id);
2118         ret = power_well->ops->is_enabled(dev_priv, power_well);
2119
2120         return ret;
2121 }
2122
2123 static struct i915_power_well skl_power_wells[] = {
2124         {
2125                 .name = "always-on",
2126                 .always_on = 1,
2127                 .domains = POWER_DOMAIN_MASK,
2128                 .ops = &i9xx_always_on_power_well_ops,
2129                 .id = SKL_DISP_PW_ALWAYS_ON,
2130         },
2131         {
2132                 .name = "power well 1",
2133                 /* Handled by the DMC firmware */
2134                 .domains = 0,
2135                 .ops = &skl_power_well_ops,
2136                 .id = SKL_DISP_PW_1,
2137         },
2138         {
2139                 .name = "MISC IO power well",
2140                 /* Handled by the DMC firmware */
2141                 .domains = 0,
2142                 .ops = &skl_power_well_ops,
2143                 .id = SKL_DISP_PW_MISC_IO,
2144         },
2145         {
2146                 .name = "DC off",
2147                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2148                 .ops = &gen9_dc_off_power_well_ops,
2149                 .id = SKL_DISP_PW_DC_OFF,
2150         },
2151         {
2152                 .name = "power well 2",
2153                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2154                 .ops = &skl_power_well_ops,
2155                 .id = SKL_DISP_PW_2,
2156         },
2157         {
2158                 .name = "DDI A/E power well",
2159                 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
2160                 .ops = &skl_power_well_ops,
2161                 .id = SKL_DISP_PW_DDI_A_E,
2162         },
2163         {
2164                 .name = "DDI B power well",
2165                 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
2166                 .ops = &skl_power_well_ops,
2167                 .id = SKL_DISP_PW_DDI_B,
2168         },
2169         {
2170                 .name = "DDI C power well",
2171                 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
2172                 .ops = &skl_power_well_ops,
2173                 .id = SKL_DISP_PW_DDI_C,
2174         },
2175         {
2176                 .name = "DDI D power well",
2177                 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
2178                 .ops = &skl_power_well_ops,
2179                 .id = SKL_DISP_PW_DDI_D,
2180         },
2181 };
2182
2183 static struct i915_power_well bxt_power_wells[] = {
2184         {
2185                 .name = "always-on",
2186                 .always_on = 1,
2187                 .domains = POWER_DOMAIN_MASK,
2188                 .ops = &i9xx_always_on_power_well_ops,
2189         },
2190         {
2191                 .name = "power well 1",
2192                 .domains = 0,
2193                 .ops = &skl_power_well_ops,
2194                 .id = SKL_DISP_PW_1,
2195         },
2196         {
2197                 .name = "DC off",
2198                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2199                 .ops = &gen9_dc_off_power_well_ops,
2200                 .id = SKL_DISP_PW_DC_OFF,
2201         },
2202         {
2203                 .name = "power well 2",
2204                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2205                 .ops = &skl_power_well_ops,
2206                 .id = SKL_DISP_PW_2,
2207         },
2208         {
2209                 .name = "dpio-common-a",
2210                 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2211                 .ops = &bxt_dpio_cmn_power_well_ops,
2212                 .id = BXT_DPIO_CMN_A,
2213                 .data = DPIO_PHY1,
2214         },
2215         {
2216                 .name = "dpio-common-bc",
2217                 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2218                 .ops = &bxt_dpio_cmn_power_well_ops,
2219                 .id = BXT_DPIO_CMN_BC,
2220                 .data = DPIO_PHY0,
2221         },
2222 };
2223
2224 static struct i915_power_well glk_power_wells[] = {
2225         {
2226                 .name = "always-on",
2227                 .always_on = 1,
2228                 .domains = POWER_DOMAIN_MASK,
2229                 .ops = &i9xx_always_on_power_well_ops,
2230         },
2231         {
2232                 .name = "power well 1",
2233                 /* Handled by the DMC firmware */
2234                 .domains = 0,
2235                 .ops = &skl_power_well_ops,
2236                 .id = SKL_DISP_PW_1,
2237         },
2238         {
2239                 .name = "DC off",
2240                 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2241                 .ops = &gen9_dc_off_power_well_ops,
2242                 .id = SKL_DISP_PW_DC_OFF,
2243         },
2244         {
2245                 .name = "power well 2",
2246                 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2247                 .ops = &skl_power_well_ops,
2248                 .id = SKL_DISP_PW_2,
2249         },
2250         {
2251                 .name = "dpio-common-a",
2252                 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2253                 .ops = &bxt_dpio_cmn_power_well_ops,
2254                 .id = BXT_DPIO_CMN_A,
2255                 .data = DPIO_PHY1,
2256         },
2257         {
2258                 .name = "dpio-common-b",
2259                 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2260                 .ops = &bxt_dpio_cmn_power_well_ops,
2261                 .id = BXT_DPIO_CMN_BC,
2262                 .data = DPIO_PHY0,
2263         },
2264         {
2265                 .name = "dpio-common-c",
2266                 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2267                 .ops = &bxt_dpio_cmn_power_well_ops,
2268                 .id = GLK_DPIO_CMN_C,
2269                 .data = DPIO_PHY2,
2270         },
2271         {
2272                 .name = "AUX A",
2273                 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2274                 .ops = &skl_power_well_ops,
2275                 .id = GLK_DISP_PW_AUX_A,
2276         },
2277         {
2278                 .name = "AUX B",
2279                 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2280                 .ops = &skl_power_well_ops,
2281                 .id = GLK_DISP_PW_AUX_B,
2282         },
2283         {
2284                 .name = "AUX C",
2285                 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2286                 .ops = &skl_power_well_ops,
2287                 .id = GLK_DISP_PW_AUX_C,
2288         },
2289         {
2290                 .name = "DDI A power well",
2291                 .domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS,
2292                 .ops = &skl_power_well_ops,
2293                 .id = GLK_DISP_PW_DDI_A,
2294         },
2295         {
2296                 .name = "DDI B power well",
2297                 .domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS,
2298                 .ops = &skl_power_well_ops,
2299                 .id = SKL_DISP_PW_DDI_B,
2300         },
2301         {
2302                 .name = "DDI C power well",
2303                 .domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS,
2304                 .ops = &skl_power_well_ops,
2305                 .id = SKL_DISP_PW_DDI_C,
2306         },
2307 };
2308
2309 static int
2310 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2311                                    int disable_power_well)
2312 {
2313         if (disable_power_well >= 0)
2314                 return !!disable_power_well;
2315
2316         return 1;
2317 }
2318
2319 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2320                                     int enable_dc)
2321 {
2322         uint32_t mask;
2323         int requested_dc;
2324         int max_dc;
2325
2326         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2327                 max_dc = 2;
2328                 mask = 0;
2329         } else if (IS_GEN9_LP(dev_priv)) {
2330                 max_dc = 1;
2331                 /*
2332                  * DC9 has a separate HW flow from the rest of the DC states,
2333                  * not depending on the DMC firmware. It's needed by system
2334                  * suspend/resume, so allow it unconditionally.
2335                  */
2336                 mask = DC_STATE_EN_DC9;
2337         } else {
2338                 max_dc = 0;
2339                 mask = 0;
2340         }
2341
2342         if (!i915.disable_power_well)
2343                 max_dc = 0;
2344
2345         if (enable_dc >= 0 && enable_dc <= max_dc) {
2346                 requested_dc = enable_dc;
2347         } else if (enable_dc == -1) {
2348                 requested_dc = max_dc;
2349         } else if (enable_dc > max_dc && enable_dc <= 2) {
2350                 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2351                               enable_dc, max_dc);
2352                 requested_dc = max_dc;
2353         } else {
2354                 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2355                 requested_dc = max_dc;
2356         }
2357
2358         if (requested_dc > 1)
2359                 mask |= DC_STATE_EN_UPTO_DC6;
2360         if (requested_dc > 0)
2361                 mask |= DC_STATE_EN_UPTO_DC5;
2362
2363         DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2364
2365         return mask;
2366 }
2367
2368 #define set_power_wells(power_domains, __power_wells) ({                \
2369         (power_domains)->power_wells = (__power_wells);                 \
2370         (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
2371 })
2372
2373 /**
2374  * intel_power_domains_init - initializes the power domain structures
2375  * @dev_priv: i915 device instance
2376  *
2377  * Initializes the power domain structures for @dev_priv depending upon the
2378  * supported platform.
2379  */
2380 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2381 {
2382         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2383
2384         i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2385                                                      i915.disable_power_well);
2386         dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2387                                                             i915.enable_dc);
2388
2389         BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
2390
2391         mutex_init(&power_domains->lock);
2392
2393         /*
2394          * The enabling order will be from lower to higher indexed wells,
2395          * the disabling order is reversed.
2396          */
2397         if (IS_HASWELL(dev_priv)) {
2398                 set_power_wells(power_domains, hsw_power_wells);
2399         } else if (IS_BROADWELL(dev_priv)) {
2400                 set_power_wells(power_domains, bdw_power_wells);
2401         } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2402                 set_power_wells(power_domains, skl_power_wells);
2403         } else if (IS_BROXTON(dev_priv)) {
2404                 set_power_wells(power_domains, bxt_power_wells);
2405         } else if (IS_GEMINILAKE(dev_priv)) {
2406                 set_power_wells(power_domains, glk_power_wells);
2407         } else if (IS_CHERRYVIEW(dev_priv)) {
2408                 set_power_wells(power_domains, chv_power_wells);
2409         } else if (IS_VALLEYVIEW(dev_priv)) {
2410                 set_power_wells(power_domains, vlv_power_wells);
2411         } else {
2412                 set_power_wells(power_domains, i9xx_always_on_power_well);
2413         }
2414
2415         return 0;
2416 }
2417
2418 /**
2419  * intel_power_domains_fini - finalizes the power domain structures
2420  * @dev_priv: i915 device instance
2421  *
2422  * Finalizes the power domain structures for @dev_priv depending upon the
2423  * supported platform. This function also disables runtime pm and ensures that
2424  * the device stays powered up so that the driver can be reloaded.
2425  */
2426 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2427 {
2428         struct device *kdev = &dev_priv->drm.pdev->dev;
2429
2430         /*
2431          * The i915.ko module is still not prepared to be loaded when
2432          * the power well is not enabled, so just enable it in case
2433          * we're going to unload/reload.
2434          * The following also reacquires the RPM reference the core passed
2435          * to the driver during loading, which is dropped in
2436          * intel_runtime_pm_enable(). We have to hand back the control of the
2437          * device to the core with this reference held.
2438          */
2439         intel_display_set_init_power(dev_priv, true);
2440
2441         /* Remove the refcount we took to keep power well support disabled. */
2442         if (!i915.disable_power_well)
2443                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2444
2445         /*
2446          * Remove the refcount we took in intel_runtime_pm_enable() in case
2447          * the platform doesn't support runtime PM.
2448          */
2449         if (!HAS_RUNTIME_PM(dev_priv))
2450                 pm_runtime_put(kdev);
2451 }
2452
2453 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2454 {
2455         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2456         struct i915_power_well *power_well;
2457         int i;
2458
2459         mutex_lock(&power_domains->lock);
2460         for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
2461                 power_well->ops->sync_hw(dev_priv, power_well);
2462                 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2463                                                                      power_well);
2464         }
2465         mutex_unlock(&power_domains->lock);
2466 }
2467
2468 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2469 {
2470         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2471         POSTING_READ(DBUF_CTL);
2472
2473         udelay(10);
2474
2475         if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2476                 DRM_ERROR("DBuf power enable timeout\n");
2477 }
2478
2479 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2480 {
2481         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2482         POSTING_READ(DBUF_CTL);
2483
2484         udelay(10);
2485
2486         if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2487                 DRM_ERROR("DBuf power disable timeout!\n");
2488 }
2489
2490 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2491                                    bool resume)
2492 {
2493         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2494         struct i915_power_well *well;
2495         uint32_t val;
2496
2497         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2498
2499         /* enable PCH reset handshake */
2500         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2501         I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2502
2503         /* enable PG1 and Misc I/O */
2504         mutex_lock(&power_domains->lock);
2505
2506         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2507         intel_power_well_enable(dev_priv, well);
2508
2509         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2510         intel_power_well_enable(dev_priv, well);
2511
2512         mutex_unlock(&power_domains->lock);
2513
2514         skl_init_cdclk(dev_priv);
2515
2516         gen9_dbuf_enable(dev_priv);
2517
2518         if (resume && dev_priv->csr.dmc_payload)
2519                 intel_csr_load_program(dev_priv);
2520 }
2521
2522 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2523 {
2524         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2525         struct i915_power_well *well;
2526
2527         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2528
2529         gen9_dbuf_disable(dev_priv);
2530
2531         skl_uninit_cdclk(dev_priv);
2532
2533         /* The spec doesn't call for removing the reset handshake flag */
2534         /* disable PG1 and Misc I/O */
2535
2536         mutex_lock(&power_domains->lock);
2537
2538         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2539         intel_power_well_disable(dev_priv, well);
2540
2541         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2542         intel_power_well_disable(dev_priv, well);
2543
2544         mutex_unlock(&power_domains->lock);
2545 }
2546
2547 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2548                            bool resume)
2549 {
2550         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2551         struct i915_power_well *well;
2552         uint32_t val;
2553
2554         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2555
2556         /*
2557          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2558          * or else the reset will hang because there is no PCH to respond.
2559          * Move the handshake programming to initialization sequence.
2560          * Previously was left up to BIOS.
2561          */
2562         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2563         val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2564         I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2565
2566         /* Enable PG1 */
2567         mutex_lock(&power_domains->lock);
2568
2569         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2570         intel_power_well_enable(dev_priv, well);
2571
2572         mutex_unlock(&power_domains->lock);
2573
2574         bxt_init_cdclk(dev_priv);
2575
2576         gen9_dbuf_enable(dev_priv);
2577
2578         if (resume && dev_priv->csr.dmc_payload)
2579                 intel_csr_load_program(dev_priv);
2580 }
2581
2582 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2583 {
2584         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2585         struct i915_power_well *well;
2586
2587         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2588
2589         gen9_dbuf_disable(dev_priv);
2590
2591         bxt_uninit_cdclk(dev_priv);
2592
2593         /* The spec doesn't call for removing the reset handshake flag */
2594
2595         /* Disable PG1 */
2596         mutex_lock(&power_domains->lock);
2597
2598         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2599         intel_power_well_disable(dev_priv, well);
2600
2601         mutex_unlock(&power_domains->lock);
2602 }
2603
2604 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2605 {
2606         struct i915_power_well *cmn_bc =
2607                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2608         struct i915_power_well *cmn_d =
2609                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2610
2611         /*
2612          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2613          * workaround never ever read DISPLAY_PHY_CONTROL, and
2614          * instead maintain a shadow copy ourselves. Use the actual
2615          * power well state and lane status to reconstruct the
2616          * expected initial value.
2617          */
2618         dev_priv->chv_phy_control =
2619                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2620                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2621                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2622                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2623                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2624
2625         /*
2626          * If all lanes are disabled we leave the override disabled
2627          * with all power down bits cleared to match the state we
2628          * would use after disabling the port. Otherwise enable the
2629          * override and set the lane powerdown bits accding to the
2630          * current lane status.
2631          */
2632         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2633                 uint32_t status = I915_READ(DPLL(PIPE_A));
2634                 unsigned int mask;
2635
2636                 mask = status & DPLL_PORTB_READY_MASK;
2637                 if (mask == 0xf)
2638                         mask = 0x0;
2639                 else
2640                         dev_priv->chv_phy_control |=
2641                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2642
2643                 dev_priv->chv_phy_control |=
2644                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2645
2646                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2647                 if (mask == 0xf)
2648                         mask = 0x0;
2649                 else
2650                         dev_priv->chv_phy_control |=
2651                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2652
2653                 dev_priv->chv_phy_control |=
2654                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2655
2656                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2657
2658                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2659         } else {
2660                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2661         }
2662
2663         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2664                 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2665                 unsigned int mask;
2666
2667                 mask = status & DPLL_PORTD_READY_MASK;
2668
2669                 if (mask == 0xf)
2670                         mask = 0x0;
2671                 else
2672                         dev_priv->chv_phy_control |=
2673                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2674
2675                 dev_priv->chv_phy_control |=
2676                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2677
2678                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2679
2680                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2681         } else {
2682                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2683         }
2684
2685         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2686
2687         DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2688                       dev_priv->chv_phy_control);
2689 }
2690
2691 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2692 {
2693         struct i915_power_well *cmn =
2694                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2695         struct i915_power_well *disp2d =
2696                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2697
2698         /* If the display might be already active skip this */
2699         if (cmn->ops->is_enabled(dev_priv, cmn) &&
2700             disp2d->ops->is_enabled(dev_priv, disp2d) &&
2701             I915_READ(DPIO_CTL) & DPIO_CMNRST)
2702                 return;
2703
2704         DRM_DEBUG_KMS("toggling display PHY side reset\n");
2705
2706         /* cmnlane needs DPLL registers */
2707         disp2d->ops->enable(dev_priv, disp2d);
2708
2709         /*
2710          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2711          * Need to assert and de-assert PHY SB reset by gating the
2712          * common lane power, then un-gating it.
2713          * Simply ungating isn't enough to reset the PHY enough to get
2714          * ports and lanes running.
2715          */
2716         cmn->ops->disable(dev_priv, cmn);
2717 }
2718
2719 /**
2720  * intel_power_domains_init_hw - initialize hardware power domain state
2721  * @dev_priv: i915 device instance
2722  * @resume: Called from resume code paths or not
2723  *
2724  * This function initializes the hardware power domain state and enables all
2725  * power domains using intel_display_set_init_power().
2726  */
2727 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2728 {
2729         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2730
2731         power_domains->initializing = true;
2732
2733         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2734                 skl_display_core_init(dev_priv, resume);
2735         } else if (IS_GEN9_LP(dev_priv)) {
2736                 bxt_display_core_init(dev_priv, resume);
2737         } else if (IS_CHERRYVIEW(dev_priv)) {
2738                 mutex_lock(&power_domains->lock);
2739                 chv_phy_control_init(dev_priv);
2740                 mutex_unlock(&power_domains->lock);
2741         } else if (IS_VALLEYVIEW(dev_priv)) {
2742                 mutex_lock(&power_domains->lock);
2743                 vlv_cmnlane_wa(dev_priv);
2744                 mutex_unlock(&power_domains->lock);
2745         }
2746
2747         /* For now, we need the power well to be always enabled. */
2748         intel_display_set_init_power(dev_priv, true);
2749         /* Disable power support if the user asked so. */
2750         if (!i915.disable_power_well)
2751                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2752         intel_power_domains_sync_hw(dev_priv);
2753         power_domains->initializing = false;
2754 }
2755
2756 /**
2757  * intel_power_domains_suspend - suspend power domain state
2758  * @dev_priv: i915 device instance
2759  *
2760  * This function prepares the hardware power domain state before entering
2761  * system suspend. It must be paired with intel_power_domains_init_hw().
2762  */
2763 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2764 {
2765         /*
2766          * Even if power well support was disabled we still want to disable
2767          * power wells while we are system suspended.
2768          */
2769         if (!i915.disable_power_well)
2770                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2771
2772         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2773                 skl_display_core_uninit(dev_priv);
2774         else if (IS_GEN9_LP(dev_priv))
2775                 bxt_display_core_uninit(dev_priv);
2776 }
2777
2778 /**
2779  * intel_runtime_pm_get - grab a runtime pm reference
2780  * @dev_priv: i915 device instance
2781  *
2782  * This function grabs a device-level runtime pm reference (mostly used for GEM
2783  * code to ensure the GTT or GT is on) and ensures that it is powered up.
2784  *
2785  * Any runtime pm reference obtained by this function must have a symmetric
2786  * call to intel_runtime_pm_put() to release the reference again.
2787  */
2788 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2789 {
2790         struct pci_dev *pdev = dev_priv->drm.pdev;
2791         struct device *kdev = &pdev->dev;
2792
2793         pm_runtime_get_sync(kdev);
2794
2795         atomic_inc(&dev_priv->pm.wakeref_count);
2796         assert_rpm_wakelock_held(dev_priv);
2797 }
2798
2799 /**
2800  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2801  * @dev_priv: i915 device instance
2802  *
2803  * This function grabs a device-level runtime pm reference if the device is
2804  * already in use and ensures that it is powered up.
2805  *
2806  * Any runtime pm reference obtained by this function must have a symmetric
2807  * call to intel_runtime_pm_put() to release the reference again.
2808  */
2809 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2810 {
2811         struct pci_dev *pdev = dev_priv->drm.pdev;
2812         struct device *kdev = &pdev->dev;
2813
2814         if (IS_ENABLED(CONFIG_PM)) {
2815                 int ret = pm_runtime_get_if_in_use(kdev);
2816
2817                 /*
2818                  * In cases runtime PM is disabled by the RPM core and we get
2819                  * an -EINVAL return value we are not supposed to call this
2820                  * function, since the power state is undefined. This applies
2821                  * atm to the late/early system suspend/resume handlers.
2822                  */
2823                 WARN_ON_ONCE(ret < 0);
2824                 if (ret <= 0)
2825                         return false;
2826         }
2827
2828         atomic_inc(&dev_priv->pm.wakeref_count);
2829         assert_rpm_wakelock_held(dev_priv);
2830
2831         return true;
2832 }
2833
2834 /**
2835  * intel_runtime_pm_get_noresume - grab a runtime pm reference
2836  * @dev_priv: i915 device instance
2837  *
2838  * This function grabs a device-level runtime pm reference (mostly used for GEM
2839  * code to ensure the GTT or GT is on).
2840  *
2841  * It will _not_ power up the device but instead only check that it's powered
2842  * on.  Therefore it is only valid to call this functions from contexts where
2843  * the device is known to be powered up and where trying to power it up would
2844  * result in hilarity and deadlocks. That pretty much means only the system
2845  * suspend/resume code where this is used to grab runtime pm references for
2846  * delayed setup down in work items.
2847  *
2848  * Any runtime pm reference obtained by this function must have a symmetric
2849  * call to intel_runtime_pm_put() to release the reference again.
2850  */
2851 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2852 {
2853         struct pci_dev *pdev = dev_priv->drm.pdev;
2854         struct device *kdev = &pdev->dev;
2855
2856         assert_rpm_wakelock_held(dev_priv);
2857         pm_runtime_get_noresume(kdev);
2858
2859         atomic_inc(&dev_priv->pm.wakeref_count);
2860 }
2861
2862 /**
2863  * intel_runtime_pm_put - release a runtime pm reference
2864  * @dev_priv: i915 device instance
2865  *
2866  * This function drops the device-level runtime pm reference obtained by
2867  * intel_runtime_pm_get() and might power down the corresponding
2868  * hardware block right away if this is the last reference.
2869  */
2870 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2871 {
2872         struct pci_dev *pdev = dev_priv->drm.pdev;
2873         struct device *kdev = &pdev->dev;
2874
2875         assert_rpm_wakelock_held(dev_priv);
2876         atomic_dec(&dev_priv->pm.wakeref_count);
2877
2878         pm_runtime_mark_last_busy(kdev);
2879         pm_runtime_put_autosuspend(kdev);
2880 }
2881
2882 /**
2883  * intel_runtime_pm_enable - enable runtime pm
2884  * @dev_priv: i915 device instance
2885  *
2886  * This function enables runtime pm at the end of the driver load sequence.
2887  *
2888  * Note that this function does currently not enable runtime pm for the
2889  * subordinate display power domains. That is only done on the first modeset
2890  * using intel_display_set_init_power().
2891  */
2892 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2893 {
2894         struct pci_dev *pdev = dev_priv->drm.pdev;
2895         struct device *kdev = &pdev->dev;
2896
2897         pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
2898         pm_runtime_mark_last_busy(kdev);
2899
2900         /*
2901          * Take a permanent reference to disable the RPM functionality and drop
2902          * it only when unloading the driver. Use the low level get/put helpers,
2903          * so the driver's own RPM reference tracking asserts also work on
2904          * platforms without RPM support.
2905          */
2906         if (!HAS_RUNTIME_PM(dev_priv)) {
2907                 pm_runtime_dont_use_autosuspend(kdev);
2908                 pm_runtime_get_sync(kdev);
2909         } else {
2910                 pm_runtime_use_autosuspend(kdev);
2911         }
2912
2913         /*
2914          * The core calls the driver load handler with an RPM reference held.
2915          * We drop that here and will reacquire it during unloading in
2916          * intel_power_domains_fini().
2917          */
2918         pm_runtime_put_autosuspend(kdev);
2919 }