Merge tag 'x86-urgent-2020-10-27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / sun4i / sun4i_backend.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015 Free Electrons
4  * Copyright (C) 2015 NextThing Co
5  *
6  * Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8
9 #include <linux/component.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_graph.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc.h>
21 #include <drm/drm_fb_cma_helper.h>
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_gem_cma_helper.h>
24 #include <drm/drm_plane_helper.h>
25 #include <drm/drm_probe_helper.h>
26
27 #include "sun4i_backend.h"
28 #include "sun4i_drv.h"
29 #include "sun4i_frontend.h"
30 #include "sun4i_layer.h"
31 #include "sunxi_engine.h"
32
33 struct sun4i_backend_quirks {
34         /* backend <-> TCON muxing selection done in backend */
35         bool needs_output_muxing;
36
37         /* alpha at the lowest z position is not always supported */
38         bool supports_lowest_plane_alpha;
39 };
40
41 static const u32 sunxi_rgb2yuv_coef[12] = {
42         0x00000107, 0x00000204, 0x00000064, 0x00000108,
43         0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
44         0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
45 };
46
47 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
48 {
49         int i;
50
51         DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
52
53         /* Set color correction */
54         regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
55                      SUN4I_BACKEND_OCCTL_ENABLE);
56
57         for (i = 0; i < 12; i++)
58                 regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
59                              sunxi_rgb2yuv_coef[i]);
60 }
61
62 static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
63 {
64         DRM_DEBUG_DRIVER("Disabling color correction\n");
65
66         /* Disable color correction */
67         regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
68                            SUN4I_BACKEND_OCCTL_ENABLE, 0);
69 }
70
71 static void sun4i_backend_commit(struct sunxi_engine *engine)
72 {
73         DRM_DEBUG_DRIVER("Committing changes\n");
74
75         regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
76                      SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
77                      SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
78 }
79
80 void sun4i_backend_layer_enable(struct sun4i_backend *backend,
81                                 int layer, bool enable)
82 {
83         u32 val;
84
85         DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
86                          layer);
87
88         if (enable)
89                 val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
90         else
91                 val = 0;
92
93         regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
94                            SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
95 }
96
97 static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
98 {
99         switch (format) {
100         case DRM_FORMAT_ARGB8888:
101                 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
102                 break;
103
104         case DRM_FORMAT_ARGB4444:
105                 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
106                 break;
107
108         case DRM_FORMAT_ARGB1555:
109                 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
110                 break;
111
112         case DRM_FORMAT_RGBA5551:
113                 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
114                 break;
115
116         case DRM_FORMAT_RGBA4444:
117                 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
118                 break;
119
120         case DRM_FORMAT_XRGB8888:
121                 *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
122                 break;
123
124         case DRM_FORMAT_RGB888:
125                 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
126                 break;
127
128         case DRM_FORMAT_RGB565:
129                 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
130                 break;
131
132         default:
133                 return -EINVAL;
134         }
135
136         return 0;
137 }
138
139 static const uint32_t sun4i_backend_formats[] = {
140         DRM_FORMAT_ARGB1555,
141         DRM_FORMAT_ARGB4444,
142         DRM_FORMAT_ARGB8888,
143         DRM_FORMAT_RGB565,
144         DRM_FORMAT_RGB888,
145         DRM_FORMAT_RGBA4444,
146         DRM_FORMAT_RGBA5551,
147         DRM_FORMAT_UYVY,
148         DRM_FORMAT_VYUY,
149         DRM_FORMAT_XRGB8888,
150         DRM_FORMAT_YUYV,
151         DRM_FORMAT_YVYU,
152 };
153
154 bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
155 {
156         unsigned int i;
157
158         if (modifier != DRM_FORMAT_MOD_LINEAR)
159                 return false;
160
161         for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
162                 if (sun4i_backend_formats[i] == fmt)
163                         return true;
164
165         return false;
166 }
167
168 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
169                                      int layer, struct drm_plane *plane)
170 {
171         struct drm_plane_state *state = plane->state;
172
173         DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
174
175         if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
176                 DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
177                                  state->crtc_w, state->crtc_h);
178                 regmap_write(backend->engine.regs, SUN4I_BACKEND_DISSIZE_REG,
179                              SUN4I_BACKEND_DISSIZE(state->crtc_w,
180                                                    state->crtc_h));
181         }
182
183         /* Set height and width */
184         DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
185                          state->crtc_w, state->crtc_h);
186         regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
187                      SUN4I_BACKEND_LAYSIZE(state->crtc_w,
188                                            state->crtc_h));
189
190         /* Set base coordinates */
191         DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
192                          state->crtc_x, state->crtc_y);
193         regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
194                      SUN4I_BACKEND_LAYCOOR(state->crtc_x,
195                                            state->crtc_y));
196
197         return 0;
198 }
199
200 static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
201                                            int layer, struct drm_plane *plane)
202 {
203         struct drm_plane_state *state = plane->state;
204         struct drm_framebuffer *fb = state->fb;
205         const struct drm_format_info *format = fb->format;
206         const uint32_t fmt = format->format;
207         u32 val = SUN4I_BACKEND_IYUVCTL_EN;
208         int i;
209
210         for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
211                 regmap_write(backend->engine.regs,
212                              SUN4I_BACKEND_YGCOEF_REG(i),
213                              sunxi_bt601_yuv2rgb_coef[i]);
214
215         /*
216          * We should do that only for a single plane, but the
217          * framebuffer's atomic_check has our back on this.
218          */
219         regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
220                            SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
221                            SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
222
223         /* TODO: Add support for the multi-planar YUV formats */
224         if (drm_format_info_is_yuv_packed(format) &&
225             drm_format_info_is_yuv_sampling_422(format))
226                 val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
227         else
228                 DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
229
230         /*
231          * Allwinner seems to list the pixel sequence from right to left, while
232          * DRM lists it from left to right.
233          */
234         switch (fmt) {
235         case DRM_FORMAT_YUYV:
236                 val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
237                 break;
238         case DRM_FORMAT_YVYU:
239                 val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
240                 break;
241         case DRM_FORMAT_UYVY:
242                 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
243                 break;
244         case DRM_FORMAT_VYUY:
245                 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
246                 break;
247         default:
248                 DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
249                                  fmt);
250         }
251
252         regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
253
254         return 0;
255 }
256
257 int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
258                                        int layer, struct drm_plane *plane)
259 {
260         struct drm_plane_state *state = plane->state;
261         struct drm_framebuffer *fb = state->fb;
262         bool interlaced = false;
263         u32 val;
264         int ret;
265
266         /* Clear the YUV mode */
267         regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
268                            SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
269
270         if (plane->state->crtc)
271                 interlaced = plane->state->crtc->state->adjusted_mode.flags
272                         & DRM_MODE_FLAG_INTERLACE;
273
274         regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
275                            SUN4I_BACKEND_MODCTL_ITLMOD_EN,
276                            interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
277
278         DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
279                          interlaced ? "on" : "off");
280
281         val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
282         if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
283                 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
284         regmap_update_bits(backend->engine.regs,
285                            SUN4I_BACKEND_ATTCTL_REG0(layer),
286                            SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
287                            SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
288                            val);
289
290         if (fb->format->is_yuv)
291                 return sun4i_backend_update_yuv_format(backend, layer, plane);
292
293         ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
294         if (ret) {
295                 DRM_DEBUG_DRIVER("Invalid format\n");
296                 return ret;
297         }
298
299         regmap_update_bits(backend->engine.regs,
300                            SUN4I_BACKEND_ATTCTL_REG1(layer),
301                            SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
302
303         return 0;
304 }
305
306 int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
307                                         int layer, uint32_t fmt)
308 {
309         u32 val;
310         int ret;
311
312         ret = sun4i_backend_drm_format_to_layer(fmt, &val);
313         if (ret) {
314                 DRM_DEBUG_DRIVER("Invalid format\n");
315                 return ret;
316         }
317
318         regmap_update_bits(backend->engine.regs,
319                            SUN4I_BACKEND_ATTCTL_REG0(layer),
320                            SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
321                            SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
322
323         regmap_update_bits(backend->engine.regs,
324                            SUN4I_BACKEND_ATTCTL_REG1(layer),
325                            SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
326
327         return 0;
328 }
329
330 static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
331                                            struct drm_framebuffer *fb,
332                                            dma_addr_t paddr)
333 {
334         /* TODO: Add support for the multi-planar YUV formats */
335         DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
336         regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
337
338         DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
339         regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
340                      fb->pitches[0] * 8);
341
342         return 0;
343 }
344
345 int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
346                                       int layer, struct drm_plane *plane)
347 {
348         struct drm_plane_state *state = plane->state;
349         struct drm_framebuffer *fb = state->fb;
350         u32 lo_paddr, hi_paddr;
351         dma_addr_t paddr;
352
353         /* Set the line width */
354         DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
355         regmap_write(backend->engine.regs,
356                      SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
357                      fb->pitches[0] * 8);
358
359         /* Get the start of the displayed memory */
360         paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
361         DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
362
363         if (fb->format->is_yuv)
364                 return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
365
366         /* Write the 32 lower bits of the address (in bits) */
367         lo_paddr = paddr << 3;
368         DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
369         regmap_write(backend->engine.regs,
370                      SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
371                      lo_paddr);
372
373         /* And the upper bits */
374         hi_paddr = paddr >> 29;
375         DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
376         regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
377                            SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
378                            SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
379
380         return 0;
381 }
382
383 int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
384                                     struct drm_plane *plane)
385 {
386         struct drm_plane_state *state = plane->state;
387         struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
388         unsigned int priority = state->normalized_zpos;
389         unsigned int pipe = p_state->pipe;
390
391         DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
392                          layer, priority, pipe);
393         regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
394                            SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
395                            SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
396                            SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
397                            SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
398
399         return 0;
400 }
401
402 void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
403                                  int layer)
404 {
405         regmap_update_bits(backend->engine.regs,
406                            SUN4I_BACKEND_ATTCTL_REG0(layer),
407                            SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
408                            SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
409 }
410
411 static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
412 {
413         u16 src_h = state->src_h >> 16;
414         u16 src_w = state->src_w >> 16;
415
416         DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
417                          src_w, src_h, state->crtc_w, state->crtc_h);
418
419         if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
420                 return true;
421
422         return false;
423 }
424
425 static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
426 {
427         struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
428         struct sun4i_backend *backend = layer->backend;
429         uint32_t format = state->fb->format->format;
430         uint64_t modifier = state->fb->modifier;
431
432         if (IS_ERR(backend->frontend))
433                 return false;
434
435         if (!sun4i_frontend_format_is_supported(format, modifier))
436                 return false;
437
438         if (!sun4i_backend_format_is_supported(format, modifier))
439                 return true;
440
441         /*
442          * TODO: The backend alone allows 2x and 4x integer scaling, including
443          * support for an alpha component (which the frontend doesn't support).
444          * Use the backend directly instead of the frontend in this case, with
445          * another test to return false.
446          */
447
448         if (sun4i_backend_plane_uses_scaler(state))
449                 return true;
450
451         /*
452          * Here the format is supported by both the frontend and the backend
453          * and no frontend scaling is required, so use the backend directly.
454          */
455         return false;
456 }
457
458 static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
459                                              bool *uses_frontend)
460 {
461         if (sun4i_backend_plane_uses_frontend(state)) {
462                 *uses_frontend = true;
463                 return true;
464         }
465
466         *uses_frontend = false;
467
468         /* Scaling is not supported without the frontend. */
469         if (sun4i_backend_plane_uses_scaler(state))
470                 return false;
471
472         return true;
473 }
474
475 static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
476                                        struct drm_crtc_state *old_state)
477 {
478         u32 val;
479
480         WARN_ON(regmap_read_poll_timeout(engine->regs,
481                                          SUN4I_BACKEND_REGBUFFCTL_REG,
482                                          val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
483                                          100, 50000));
484 }
485
486 static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
487                                       struct drm_crtc_state *crtc_state)
488 {
489         struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
490         struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
491         struct drm_atomic_state *state = crtc_state->state;
492         struct drm_device *drm = state->dev;
493         struct drm_plane *plane;
494         unsigned int num_planes = 0;
495         unsigned int num_alpha_planes = 0;
496         unsigned int num_frontend_planes = 0;
497         unsigned int num_alpha_planes_max = 1;
498         unsigned int num_yuv_planes = 0;
499         unsigned int current_pipe = 0;
500         unsigned int i;
501
502         DRM_DEBUG_DRIVER("Starting checking our planes\n");
503
504         if (!crtc_state->planes_changed)
505                 return 0;
506
507         drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
508                 struct drm_plane_state *plane_state =
509                         drm_atomic_get_plane_state(state, plane);
510                 struct sun4i_layer_state *layer_state =
511                         state_to_sun4i_layer_state(plane_state);
512                 struct drm_framebuffer *fb = plane_state->fb;
513                 struct drm_format_name_buf format_name;
514
515                 if (!sun4i_backend_plane_is_supported(plane_state,
516                                                       &layer_state->uses_frontend))
517                         return -EINVAL;
518
519                 if (layer_state->uses_frontend) {
520                         DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
521                                          plane->index);
522                         num_frontend_planes++;
523                 } else {
524                         if (fb->format->is_yuv) {
525                                 DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
526                                 num_yuv_planes++;
527                         }
528                 }
529
530                 DRM_DEBUG_DRIVER("Plane FB format is %s\n",
531                                  drm_get_format_name(fb->format->format,
532                                                      &format_name));
533                 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
534                         num_alpha_planes++;
535
536                 DRM_DEBUG_DRIVER("Plane zpos is %d\n",
537                                  plane_state->normalized_zpos);
538
539                 /* Sort our planes by Zpos */
540                 plane_states[plane_state->normalized_zpos] = plane_state;
541
542                 num_planes++;
543         }
544
545         /* All our planes were disabled, bail out */
546         if (!num_planes)
547                 return 0;
548
549         /*
550          * The hardware is a bit unusual here.
551          *
552          * Even though it supports 4 layers, it does the composition
553          * in two separate steps.
554          *
555          * The first one is assigning a layer to one of its two
556          * pipes. If more that 1 layer is assigned to the same pipe,
557          * and if pixels overlaps, the pipe will take the pixel from
558          * the layer with the highest priority.
559          *
560          * The second step is the actual alpha blending, that takes
561          * the two pipes as input, and uses the potential alpha
562          * component to do the transparency between the two.
563          *
564          * This two-step scenario makes us unable to guarantee a
565          * robust alpha blending between the 4 layers in all
566          * situations, since this means that we need to have one layer
567          * with alpha at the lowest position of our two pipes.
568          *
569          * However, we cannot even do that on every platform, since
570          * the hardware has a bug where the lowest plane of the lowest
571          * pipe (pipe 0, priority 0), if it has any alpha, will
572          * discard the pixel data entirely and just display the pixels
573          * in the background color (black by default).
574          *
575          * This means that on the affected platforms, we effectively
576          * have only three valid configurations with alpha, all of
577          * them with the alpha being on pipe1 with the lowest
578          * position, which can be 1, 2 or 3 depending on the number of
579          * planes and their zpos.
580          */
581
582         /* For platforms that are not affected by the issue described above. */
583         if (backend->quirks->supports_lowest_plane_alpha)
584                 num_alpha_planes_max++;
585
586         if (num_alpha_planes > num_alpha_planes_max) {
587                 DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
588                 return -EINVAL;
589         }
590
591         /* We can't have an alpha plane at the lowest position */
592         if (!backend->quirks->supports_lowest_plane_alpha &&
593             (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
594                 return -EINVAL;
595
596         for (i = 1; i < num_planes; i++) {
597                 struct drm_plane_state *p_state = plane_states[i];
598                 struct drm_framebuffer *fb = p_state->fb;
599                 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
600
601                 /*
602                  * The only alpha position is the lowest plane of the
603                  * second pipe.
604                  */
605                 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
606                         current_pipe++;
607
608                 s_state->pipe = current_pipe;
609         }
610
611         /* We can only have a single YUV plane at a time */
612         if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
613                 DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
614                 return -EINVAL;
615         }
616
617         if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
618                 DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
619                 return -EINVAL;
620         }
621
622         DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
623                          num_planes, num_alpha_planes, num_frontend_planes,
624                          num_yuv_planes);
625
626         return 0;
627 }
628
629 static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
630 {
631         struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
632         struct sun4i_frontend *frontend = backend->frontend;
633
634         if (!frontend)
635                 return;
636
637         /*
638          * In a teardown scenario with the frontend involved, we have
639          * to keep the frontend enabled until the next vblank, and
640          * only then disable it.
641          *
642          * This is due to the fact that the backend will not take into
643          * account the new configuration (with the plane that used to
644          * be fed by the frontend now disabled) until we write to the
645          * commit bit and the hardware fetches the new configuration
646          * during the next vblank.
647          *
648          * So we keep the frontend around in order to prevent any
649          * visual artifacts.
650          */
651         spin_lock(&backend->frontend_lock);
652         if (backend->frontend_teardown) {
653                 sun4i_frontend_exit(frontend);
654                 backend->frontend_teardown = false;
655         }
656         spin_unlock(&backend->frontend_lock);
657 };
658
659 static int sun4i_backend_init_sat(struct device *dev) {
660         struct sun4i_backend *backend = dev_get_drvdata(dev);
661         int ret;
662
663         backend->sat_reset = devm_reset_control_get(dev, "sat");
664         if (IS_ERR(backend->sat_reset)) {
665                 dev_err(dev, "Couldn't get the SAT reset line\n");
666                 return PTR_ERR(backend->sat_reset);
667         }
668
669         ret = reset_control_deassert(backend->sat_reset);
670         if (ret) {
671                 dev_err(dev, "Couldn't deassert the SAT reset line\n");
672                 return ret;
673         }
674
675         backend->sat_clk = devm_clk_get(dev, "sat");
676         if (IS_ERR(backend->sat_clk)) {
677                 dev_err(dev, "Couldn't get our SAT clock\n");
678                 ret = PTR_ERR(backend->sat_clk);
679                 goto err_assert_reset;
680         }
681
682         ret = clk_prepare_enable(backend->sat_clk);
683         if (ret) {
684                 dev_err(dev, "Couldn't enable the SAT clock\n");
685                 return ret;
686         }
687
688         return 0;
689
690 err_assert_reset:
691         reset_control_assert(backend->sat_reset);
692         return ret;
693 }
694
695 static int sun4i_backend_free_sat(struct device *dev) {
696         struct sun4i_backend *backend = dev_get_drvdata(dev);
697
698         clk_disable_unprepare(backend->sat_clk);
699         reset_control_assert(backend->sat_reset);
700
701         return 0;
702 }
703
704 /*
705  * The display backend can take video output from the display frontend, or
706  * the display enhancement unit on the A80, as input for one it its layers.
707  * This relationship within the display pipeline is encoded in the device
708  * tree with of_graph, and we use it here to figure out which backend, if
709  * there are 2 or more, we are currently probing. The number would be in
710  * the "reg" property of the upstream output port endpoint.
711  */
712 static int sun4i_backend_of_get_id(struct device_node *node)
713 {
714         struct device_node *ep, *remote;
715         struct of_endpoint of_ep;
716
717         /* Input port is 0, and we want the first endpoint. */
718         ep = of_graph_get_endpoint_by_regs(node, 0, -1);
719         if (!ep)
720                 return -EINVAL;
721
722         remote = of_graph_get_remote_endpoint(ep);
723         of_node_put(ep);
724         if (!remote)
725                 return -EINVAL;
726
727         of_graph_parse_endpoint(remote, &of_ep);
728         of_node_put(remote);
729         return of_ep.id;
730 }
731
732 /* TODO: This needs to take multiple pipelines into account */
733 static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
734                                                           struct device_node *node)
735 {
736         struct device_node *port, *ep, *remote;
737         struct sun4i_frontend *frontend;
738
739         port = of_graph_get_port_by_id(node, 0);
740         if (!port)
741                 return ERR_PTR(-EINVAL);
742
743         for_each_available_child_of_node(port, ep) {
744                 remote = of_graph_get_remote_port_parent(ep);
745                 if (!remote)
746                         continue;
747                 of_node_put(remote);
748
749                 /* does this node match any registered engines? */
750                 list_for_each_entry(frontend, &drv->frontend_list, list) {
751                         if (remote == frontend->node) {
752                                 of_node_put(port);
753                                 of_node_put(ep);
754                                 return frontend;
755                         }
756                 }
757         }
758         of_node_put(port);
759         return ERR_PTR(-EINVAL);
760 }
761
762 static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
763         .atomic_begin                   = sun4i_backend_atomic_begin,
764         .atomic_check                   = sun4i_backend_atomic_check,
765         .commit                         = sun4i_backend_commit,
766         .layers_init                    = sun4i_layers_init,
767         .apply_color_correction         = sun4i_backend_apply_color_correction,
768         .disable_color_correction       = sun4i_backend_disable_color_correction,
769         .vblank_quirk                   = sun4i_backend_vblank_quirk,
770 };
771
772 static const struct regmap_config sun4i_backend_regmap_config = {
773         .reg_bits       = 32,
774         .val_bits       = 32,
775         .reg_stride     = 4,
776         .max_register   = 0x5800,
777 };
778
779 static int sun4i_backend_bind(struct device *dev, struct device *master,
780                               void *data)
781 {
782         struct platform_device *pdev = to_platform_device(dev);
783         struct drm_device *drm = data;
784         struct sun4i_drv *drv = drm->dev_private;
785         struct sun4i_backend *backend;
786         const struct sun4i_backend_quirks *quirks;
787         struct resource *res;
788         void __iomem *regs;
789         int i, ret;
790
791         backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
792         if (!backend)
793                 return -ENOMEM;
794         dev_set_drvdata(dev, backend);
795         spin_lock_init(&backend->frontend_lock);
796
797         if (of_find_property(dev->of_node, "interconnects", NULL)) {
798                 /*
799                  * This assume we have the same DMA constraints for all our the
800                  * devices in our pipeline (all the backends, but also the
801                  * frontends). This sounds bad, but it has always been the case
802                  * for us, and DRM doesn't do per-device allocation either, so
803                  * we would need to fix DRM first...
804                  */
805                 ret = of_dma_configure(drm->dev, dev->of_node, true);
806                 if (ret)
807                         return ret;
808         } else {
809                 /*
810                  * If we don't have the interconnect property, most likely
811                  * because of an old DT, we need to set the DMA offset by hand
812                  * on our device since the RAM mapping is at 0 for the DMA bus,
813                  * unlike the CPU.
814                  *
815                  * XXX(hch): this has no business in a driver and needs to move
816                  * to the device tree.
817                  */
818                 ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
819                 if (ret)
820                         return ret;
821         }
822
823         backend->engine.node = dev->of_node;
824         backend->engine.ops = &sun4i_backend_engine_ops;
825         backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
826         if (backend->engine.id < 0)
827                 return backend->engine.id;
828
829         backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
830         if (IS_ERR(backend->frontend))
831                 dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
832
833         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
834         regs = devm_ioremap_resource(dev, res);
835         if (IS_ERR(regs))
836                 return PTR_ERR(regs);
837
838         backend->reset = devm_reset_control_get(dev, NULL);
839         if (IS_ERR(backend->reset)) {
840                 dev_err(dev, "Couldn't get our reset line\n");
841                 return PTR_ERR(backend->reset);
842         }
843
844         ret = reset_control_deassert(backend->reset);
845         if (ret) {
846                 dev_err(dev, "Couldn't deassert our reset line\n");
847                 return ret;
848         }
849
850         backend->bus_clk = devm_clk_get(dev, "ahb");
851         if (IS_ERR(backend->bus_clk)) {
852                 dev_err(dev, "Couldn't get the backend bus clock\n");
853                 ret = PTR_ERR(backend->bus_clk);
854                 goto err_assert_reset;
855         }
856         clk_prepare_enable(backend->bus_clk);
857
858         backend->mod_clk = devm_clk_get(dev, "mod");
859         if (IS_ERR(backend->mod_clk)) {
860                 dev_err(dev, "Couldn't get the backend module clock\n");
861                 ret = PTR_ERR(backend->mod_clk);
862                 goto err_disable_bus_clk;
863         }
864
865         ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
866         if (ret) {
867                 dev_err(dev, "Couldn't set the module clock frequency\n");
868                 goto err_disable_bus_clk;
869         }
870
871         clk_prepare_enable(backend->mod_clk);
872
873         backend->ram_clk = devm_clk_get(dev, "ram");
874         if (IS_ERR(backend->ram_clk)) {
875                 dev_err(dev, "Couldn't get the backend RAM clock\n");
876                 ret = PTR_ERR(backend->ram_clk);
877                 goto err_disable_mod_clk;
878         }
879         clk_prepare_enable(backend->ram_clk);
880
881         if (of_device_is_compatible(dev->of_node,
882                                     "allwinner,sun8i-a33-display-backend")) {
883                 ret = sun4i_backend_init_sat(dev);
884                 if (ret) {
885                         dev_err(dev, "Couldn't init SAT resources\n");
886                         goto err_disable_ram_clk;
887                 }
888         }
889
890         backend->engine.regs = devm_regmap_init_mmio(dev, regs,
891                                                      &sun4i_backend_regmap_config);
892         if (IS_ERR(backend->engine.regs)) {
893                 dev_err(dev, "Couldn't create the backend regmap\n");
894                 return PTR_ERR(backend->engine.regs);
895         }
896
897         list_add_tail(&backend->engine.list, &drv->engine_list);
898
899         /*
900          * Many of the backend's layer configuration registers have
901          * undefined default values. This poses a risk as we use
902          * regmap_update_bits in some places, and don't overwrite
903          * the whole register.
904          *
905          * Clear the registers here to have something predictable.
906          */
907         for (i = 0x800; i < 0x1000; i += 4)
908                 regmap_write(backend->engine.regs, i, 0);
909
910         /* Disable registers autoloading */
911         regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
912                      SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
913
914         /* Enable the backend */
915         regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
916                      SUN4I_BACKEND_MODCTL_DEBE_EN |
917                      SUN4I_BACKEND_MODCTL_START_CTL);
918
919         /* Set output selection if needed */
920         quirks = of_device_get_match_data(dev);
921         if (quirks->needs_output_muxing) {
922                 /*
923                  * We assume there is no dynamic muxing of backends
924                  * and TCONs, so we select the backend with same ID.
925                  *
926                  * While dynamic selection might be interesting, since
927                  * the CRTC is tied to the TCON, while the layers are
928                  * tied to the backends, this means, we will need to
929                  * switch between groups of layers. There might not be
930                  * a way to represent this constraint in DRM.
931                  */
932                 regmap_update_bits(backend->engine.regs,
933                                    SUN4I_BACKEND_MODCTL_REG,
934                                    SUN4I_BACKEND_MODCTL_OUT_SEL,
935                                    (backend->engine.id
936                                     ? SUN4I_BACKEND_MODCTL_OUT_LCD1
937                                     : SUN4I_BACKEND_MODCTL_OUT_LCD0));
938         }
939
940         backend->quirks = quirks;
941
942         return 0;
943
944 err_disable_ram_clk:
945         clk_disable_unprepare(backend->ram_clk);
946 err_disable_mod_clk:
947         clk_rate_exclusive_put(backend->mod_clk);
948         clk_disable_unprepare(backend->mod_clk);
949 err_disable_bus_clk:
950         clk_disable_unprepare(backend->bus_clk);
951 err_assert_reset:
952         reset_control_assert(backend->reset);
953         return ret;
954 }
955
956 static void sun4i_backend_unbind(struct device *dev, struct device *master,
957                                  void *data)
958 {
959         struct sun4i_backend *backend = dev_get_drvdata(dev);
960
961         list_del(&backend->engine.list);
962
963         if (of_device_is_compatible(dev->of_node,
964                                     "allwinner,sun8i-a33-display-backend"))
965                 sun4i_backend_free_sat(dev);
966
967         clk_disable_unprepare(backend->ram_clk);
968         clk_rate_exclusive_put(backend->mod_clk);
969         clk_disable_unprepare(backend->mod_clk);
970         clk_disable_unprepare(backend->bus_clk);
971         reset_control_assert(backend->reset);
972 }
973
974 static const struct component_ops sun4i_backend_ops = {
975         .bind   = sun4i_backend_bind,
976         .unbind = sun4i_backend_unbind,
977 };
978
979 static int sun4i_backend_probe(struct platform_device *pdev)
980 {
981         return component_add(&pdev->dev, &sun4i_backend_ops);
982 }
983
984 static int sun4i_backend_remove(struct platform_device *pdev)
985 {
986         component_del(&pdev->dev, &sun4i_backend_ops);
987
988         return 0;
989 }
990
991 static const struct sun4i_backend_quirks sun4i_backend_quirks = {
992         .needs_output_muxing = true,
993 };
994
995 static const struct sun4i_backend_quirks sun5i_backend_quirks = {
996 };
997
998 static const struct sun4i_backend_quirks sun6i_backend_quirks = {
999 };
1000
1001 static const struct sun4i_backend_quirks sun7i_backend_quirks = {
1002         .needs_output_muxing = true,
1003 };
1004
1005 static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
1006         .supports_lowest_plane_alpha = true,
1007 };
1008
1009 static const struct sun4i_backend_quirks sun9i_backend_quirks = {
1010 };
1011
1012 static const struct of_device_id sun4i_backend_of_table[] = {
1013         {
1014                 .compatible = "allwinner,sun4i-a10-display-backend",
1015                 .data = &sun4i_backend_quirks,
1016         },
1017         {
1018                 .compatible = "allwinner,sun5i-a13-display-backend",
1019                 .data = &sun5i_backend_quirks,
1020         },
1021         {
1022                 .compatible = "allwinner,sun6i-a31-display-backend",
1023                 .data = &sun6i_backend_quirks,
1024         },
1025         {
1026                 .compatible = "allwinner,sun7i-a20-display-backend",
1027                 .data = &sun7i_backend_quirks,
1028         },
1029         {
1030                 .compatible = "allwinner,sun8i-a23-display-backend",
1031                 .data = &sun8i_a33_backend_quirks,
1032         },
1033         {
1034                 .compatible = "allwinner,sun8i-a33-display-backend",
1035                 .data = &sun8i_a33_backend_quirks,
1036         },
1037         {
1038                 .compatible = "allwinner,sun9i-a80-display-backend",
1039                 .data = &sun9i_backend_quirks,
1040         },
1041         { }
1042 };
1043 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1044
1045 static struct platform_driver sun4i_backend_platform_driver = {
1046         .probe          = sun4i_backend_probe,
1047         .remove         = sun4i_backend_remove,
1048         .driver         = {
1049                 .name           = "sun4i-backend",
1050                 .of_match_table = sun4i_backend_of_table,
1051         },
1052 };
1053 module_platform_driver(sun4i_backend_platform_driver);
1054
1055 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1056 MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1057 MODULE_LICENSE("GPL");