2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <drm/drm_mode.h>
22 #include "drm_crtc_helper.h"
23 #include "drm_flip_work.h"
28 struct drm_plane *planes[8];
32 /* which mixer/encoder we route output to: */
35 /* if there is a pending flip, these will be non-null: */
36 struct drm_pending_vblank_event *event;
37 struct msm_fence_cb pageflip_cb;
39 #define PENDING_CURSOR 0x1
40 #define PENDING_FLIP 0x2
43 /* the fb that we logically (from PoV of KMS API) hold a ref
44 * to. Which we may not yet be scanning out (we may still
45 * be scanning out previous in case of page_flip while waiting
46 * for gpu rendering to complete:
48 struct drm_framebuffer *fb;
50 /* the fb that we currently hold a scanout ref to: */
51 struct drm_framebuffer *scanout_fb;
53 /* for unref'ing framebuffers after scanout completes: */
54 struct drm_flip_work unref_fb_work;
56 struct mdp_irq vblank;
59 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
61 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
63 struct msm_drm_private *priv = crtc->dev->dev_private;
64 return to_mdp5_kms(to_mdp_kms(priv->kms));
67 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
69 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
71 atomic_or(pending, &mdp5_crtc->pending);
72 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
75 static void crtc_flush(struct drm_crtc *crtc)
77 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
78 struct mdp5_kms *mdp5_kms = get_kms(crtc);
79 int id = mdp5_crtc->id;
80 uint32_t i, flush = 0;
82 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
83 struct drm_plane *plane = mdp5_crtc->planes[i];
85 enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
86 flush |= pipe2flush(pipe);
89 flush |= mixer2flush(mdp5_crtc->id);
90 flush |= MDP5_CTL_FLUSH_CTL;
92 DBG("%s: flush=%08x", mdp5_crtc->name, flush);
94 mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
97 static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
99 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
100 struct drm_framebuffer *old_fb = mdp5_crtc->fb;
102 /* grab reference to incoming scanout fb: */
103 drm_framebuffer_reference(new_fb);
104 mdp5_crtc->base.primary->fb = new_fb;
105 mdp5_crtc->fb = new_fb;
108 drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
111 /* unlike update_fb(), take a ref to the new scanout fb *before* updating
112 * plane, then call this. Needed to ensure we don't unref the buffer that
113 * is actually still being scanned out.
115 * Note that this whole thing goes away with atomic.. since we can defer
116 * calling into driver until rendering is done.
118 static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
120 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
122 /* flush updates, to make sure hw is updated to new scanout fb,
123 * so that we can safely queue unref to current fb (ie. next
124 * vblank we know hw is done w/ previous scanout_fb).
128 if (mdp5_crtc->scanout_fb)
129 drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
130 mdp5_crtc->scanout_fb);
132 mdp5_crtc->scanout_fb = fb;
134 /* enable vblank to complete flip: */
135 request_pending(crtc, PENDING_FLIP);
138 /* if file!=NULL, this is preclose potential cancel-flip path */
139 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
141 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
142 struct drm_device *dev = crtc->dev;
143 struct drm_pending_vblank_event *event;
144 unsigned long flags, i;
146 spin_lock_irqsave(&dev->event_lock, flags);
147 event = mdp5_crtc->event;
149 /* if regular vblank case (!file) or if cancel-flip from
150 * preclose on file that requested flip, then send the
153 if (!file || (event->base.file_priv == file)) {
154 mdp5_crtc->event = NULL;
155 drm_send_vblank_event(dev, mdp5_crtc->id, event);
158 spin_unlock_irqrestore(&dev->event_lock, flags);
160 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
161 struct drm_plane *plane = mdp5_crtc->planes[i];
163 mdp5_plane_complete_flip(plane);
167 static void pageflip_cb(struct msm_fence_cb *cb)
169 struct mdp5_crtc *mdp5_crtc =
170 container_of(cb, struct mdp5_crtc, pageflip_cb);
171 struct drm_crtc *crtc = &mdp5_crtc->base;
172 struct drm_framebuffer *fb = mdp5_crtc->fb;
177 drm_framebuffer_reference(fb);
178 mdp5_plane_set_scanout(crtc->primary, fb);
179 update_scanout(crtc, fb);
182 static void unref_fb_worker(struct drm_flip_work *work, void *val)
184 struct mdp5_crtc *mdp5_crtc =
185 container_of(work, struct mdp5_crtc, unref_fb_work);
186 struct drm_device *dev = mdp5_crtc->base.dev;
188 mutex_lock(&dev->mode_config.mutex);
189 drm_framebuffer_unreference(val);
190 mutex_unlock(&dev->mode_config.mutex);
193 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
195 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
197 drm_crtc_cleanup(crtc);
198 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
203 static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
205 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
206 struct mdp5_kms *mdp5_kms = get_kms(crtc);
207 bool enabled = (mode == DRM_MODE_DPMS_ON);
209 DBG("%s: mode=%d", mdp5_crtc->name, mode);
211 if (enabled != mdp5_crtc->enabled) {
213 mdp5_enable(mdp5_kms);
214 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
216 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
217 mdp5_disable(mdp5_kms);
219 mdp5_crtc->enabled = enabled;
223 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
224 const struct drm_display_mode *mode,
225 struct drm_display_mode *adjusted_mode)
230 static void blend_setup(struct drm_crtc *crtc)
232 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
233 struct mdp5_kms *mdp5_kms = get_kms(crtc);
234 int id = mdp5_crtc->id;
237 * Hard-coded setup for now until I figure out how the
242 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
243 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
244 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
245 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
246 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
247 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
248 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
249 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
251 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
252 * we want to be setting CTL[m].LAYER[n]. Not sure what the
253 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
254 * used when chaining up mixers for high resolution displays?
258 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
259 MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
260 MDP5_CTL_LAYER_REG_BORDER_COLOR);
261 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
262 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
263 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
264 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
267 static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
268 struct drm_display_mode *mode,
269 struct drm_display_mode *adjusted_mode,
271 struct drm_framebuffer *old_fb)
273 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
274 struct mdp5_kms *mdp5_kms = get_kms(crtc);
277 mode = adjusted_mode;
279 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
280 mdp5_crtc->name, mode->base.id, mode->name,
281 mode->vrefresh, mode->clock,
282 mode->hdisplay, mode->hsync_start,
283 mode->hsync_end, mode->htotal,
284 mode->vdisplay, mode->vsync_start,
285 mode->vsync_end, mode->vtotal,
286 mode->type, mode->flags);
288 /* grab extra ref for update_scanout() */
289 drm_framebuffer_reference(crtc->primary->fb);
291 ret = mdp5_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
292 0, 0, mode->hdisplay, mode->vdisplay,
294 mode->hdisplay << 16, mode->vdisplay << 16);
296 drm_framebuffer_unreference(crtc->primary->fb);
297 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
298 mdp5_crtc->name, ret);
302 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
303 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
304 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
306 update_fb(crtc, crtc->primary->fb);
307 update_scanout(crtc, crtc->primary->fb);
312 static void mdp5_crtc_prepare(struct drm_crtc *crtc)
314 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
315 DBG("%s", mdp5_crtc->name);
316 /* make sure we hold a ref to mdp clks while setting up mode: */
317 mdp5_enable(get_kms(crtc));
318 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
321 static void mdp5_crtc_commit(struct drm_crtc *crtc)
323 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
325 /* drop the ref to mdp clk's that we got in prepare: */
326 mdp5_disable(get_kms(crtc));
329 static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
330 struct drm_framebuffer *old_fb)
332 struct drm_plane *plane = crtc->primary;
333 struct drm_display_mode *mode = &crtc->mode;
336 /* grab extra ref for update_scanout() */
337 drm_framebuffer_reference(crtc->primary->fb);
339 ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
340 0, 0, mode->hdisplay, mode->vdisplay,
342 mode->hdisplay << 16, mode->vdisplay << 16);
344 drm_framebuffer_unreference(crtc->primary->fb);
348 update_fb(crtc, crtc->primary->fb);
349 update_scanout(crtc, crtc->primary->fb);
354 static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
358 static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
359 struct drm_framebuffer *new_fb,
360 struct drm_pending_vblank_event *event,
361 uint32_t page_flip_flags)
363 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
364 struct drm_device *dev = crtc->dev;
365 struct drm_gem_object *obj;
368 if (mdp5_crtc->event) {
369 dev_err(dev->dev, "already pending flip!\n");
373 obj = msm_framebuffer_bo(new_fb, 0);
375 spin_lock_irqsave(&dev->event_lock, flags);
376 mdp5_crtc->event = event;
377 spin_unlock_irqrestore(&dev->event_lock, flags);
379 update_fb(crtc, new_fb);
381 return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
384 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
385 struct drm_property *property, uint64_t val)
391 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
392 .set_config = drm_crtc_helper_set_config,
393 .destroy = mdp5_crtc_destroy,
394 .page_flip = mdp5_crtc_page_flip,
395 .set_property = mdp5_crtc_set_property,
398 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
399 .dpms = mdp5_crtc_dpms,
400 .mode_fixup = mdp5_crtc_mode_fixup,
401 .mode_set = mdp5_crtc_mode_set,
402 .prepare = mdp5_crtc_prepare,
403 .commit = mdp5_crtc_commit,
404 .mode_set_base = mdp5_crtc_mode_set_base,
405 .load_lut = mdp5_crtc_load_lut,
408 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
410 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
411 struct drm_crtc *crtc = &mdp5_crtc->base;
412 struct msm_drm_private *priv = crtc->dev->dev_private;
415 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
417 pending = atomic_xchg(&mdp5_crtc->pending, 0);
419 if (pending & PENDING_FLIP) {
420 complete_flip(crtc, NULL);
421 drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
425 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
427 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
428 struct drm_crtc *crtc = &mdp5_crtc->base;
429 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
433 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
435 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
436 return mdp5_crtc->vblank.irqmask;
439 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
441 DBG("cancel: %p", file);
442 complete_flip(crtc, file);
445 /* set interface for routing crtc->encoder: */
446 void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
447 enum mdp5_intf intf_id)
449 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
450 struct mdp5_kms *mdp5_kms = get_kms(crtc);
451 static const enum mdp5_intfnum intfnum[] = {
452 INTF0, INTF1, INTF2, INTF3,
456 /* now that we know what irq's we want: */
457 mdp5_crtc->err.irqmask = intf2err(intf);
458 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
460 /* when called from modeset_init(), skip the rest until later: */
464 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
468 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
469 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
472 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
473 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
476 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
477 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
480 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
481 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
490 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
492 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
493 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
494 MDP5_CTL_OP_MODE(MODE_NONE) |
495 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
500 static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
501 struct drm_plane *plane)
503 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
505 BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
507 if (mdp5_crtc->planes[pipe_id] == plane)
510 mdp5_crtc->planes[pipe_id] = plane;
512 if (mdp5_crtc->enabled && (plane != crtc->primary))
516 void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
518 set_attach(crtc, mdp5_plane_pipe(plane), plane);
521 void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
523 /* don't actually detatch our primary plane: */
524 if (crtc->primary == plane)
526 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
529 /* initialize crtc */
530 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
531 struct drm_plane *plane, int id)
533 struct drm_crtc *crtc = NULL;
534 struct mdp5_crtc *mdp5_crtc;
536 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
538 return ERR_PTR(-ENOMEM);
540 crtc = &mdp5_crtc->base;
544 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
545 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
547 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
548 pipe2name(mdp5_plane_pipe(plane)), id);
550 drm_flip_work_init(&mdp5_crtc->unref_fb_work,
551 "unref fb", unref_fb_worker);
553 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
555 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
556 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
558 mdp5_plane_install_properties(plane, &crtc->base);