1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * This is the general code for implementing KMS mode setting that
10 * doesn't clearly associate with any of the other objects (plane,
11 * crtc, HDMI encoder).
14 #include <drm/drm_crtc.h>
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_gem_framebuffer_helper.h>
18 #include <drm/drm_plane_helper.h>
19 #include <drm/drm_probe_helper.h>
23 struct vc4_ctm_state {
24 struct drm_private_state base;
25 struct drm_color_ctm *ctm;
29 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
31 return container_of(priv, struct vc4_ctm_state, base);
34 struct vc4_load_tracker_state {
35 struct drm_private_state base;
40 static struct vc4_load_tracker_state *
41 to_vc4_load_tracker_state(struct drm_private_state *priv)
43 return container_of(priv, struct vc4_load_tracker_state, base);
46 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
47 struct drm_private_obj *manager)
49 struct drm_device *dev = state->dev;
50 struct vc4_dev *vc4 = dev->dev_private;
51 struct drm_private_state *priv_state;
54 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
58 priv_state = drm_atomic_get_private_obj_state(state, manager);
59 if (IS_ERR(priv_state))
60 return ERR_CAST(priv_state);
62 return to_vc4_ctm_state(priv_state);
65 static struct drm_private_state *
66 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
68 struct vc4_ctm_state *state;
70 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
74 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
79 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
80 struct drm_private_state *state)
82 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
87 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
88 .atomic_duplicate_state = vc4_ctm_duplicate_state,
89 .atomic_destroy_state = vc4_ctm_destroy_state,
92 /* Converts a DRM S31.32 value to the HW S0.9 format. */
93 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
98 r = in & BIT_ULL(63) ? BIT(9) : 0;
100 if ((in & GENMASK_ULL(62, 32)) > 0) {
101 /* We have zero integer bits so we can only saturate here. */
104 /* Otherwise take the 9 most important fractional bits. */
105 r |= (in >> 23) & GENMASK(8, 0);
112 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
114 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
115 struct drm_color_ctm *ctm = ctm_state->ctm;
117 if (ctm_state->fifo) {
118 HVS_WRITE(SCALER_OLEDCOEF2,
119 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
120 SCALER_OLEDCOEF2_R_TO_R) |
121 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
122 SCALER_OLEDCOEF2_R_TO_G) |
123 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
124 SCALER_OLEDCOEF2_R_TO_B));
125 HVS_WRITE(SCALER_OLEDCOEF1,
126 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
127 SCALER_OLEDCOEF1_G_TO_R) |
128 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
129 SCALER_OLEDCOEF1_G_TO_G) |
130 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
131 SCALER_OLEDCOEF1_G_TO_B));
132 HVS_WRITE(SCALER_OLEDCOEF0,
133 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
134 SCALER_OLEDCOEF0_B_TO_R) |
135 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
136 SCALER_OLEDCOEF0_B_TO_G) |
137 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
138 SCALER_OLEDCOEF0_B_TO_B));
141 HVS_WRITE(SCALER_OLEDOFFS,
142 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
146 vc4_atomic_complete_commit(struct drm_atomic_state *state)
148 struct drm_device *dev = state->dev;
149 struct vc4_dev *vc4 = to_vc4_dev(dev);
150 struct vc4_crtc *vc4_crtc;
153 for (i = 0; i < dev->mode_config.num_crtc; i++) {
154 if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
157 vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
158 vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
161 drm_atomic_helper_wait_for_fences(dev, state, false);
163 drm_atomic_helper_wait_for_dependencies(state);
165 drm_atomic_helper_commit_modeset_disables(dev, state);
167 vc4_ctm_commit(vc4, state);
169 drm_atomic_helper_commit_planes(dev, state, 0);
171 drm_atomic_helper_commit_modeset_enables(dev, state);
173 drm_atomic_helper_fake_vblank(state);
175 drm_atomic_helper_commit_hw_done(state);
177 drm_atomic_helper_wait_for_flip_done(dev, state);
179 drm_atomic_helper_cleanup_planes(dev, state);
181 drm_atomic_helper_commit_cleanup_done(state);
183 drm_atomic_state_put(state);
185 up(&vc4->async_modeset);
188 static void commit_work(struct work_struct *work)
190 struct drm_atomic_state *state = container_of(work,
191 struct drm_atomic_state,
193 vc4_atomic_complete_commit(state);
197 * vc4_atomic_commit - commit validated state object
199 * @state: the driver state object
200 * @nonblock: nonblocking commit
202 * This function commits a with drm_atomic_helper_check() pre-validated state
203 * object. This can still fail when e.g. the framebuffer reservation fails. For
204 * now this doesn't implement asynchronous commits.
207 * Zero for success or -errno.
209 static int vc4_atomic_commit(struct drm_device *dev,
210 struct drm_atomic_state *state,
213 struct vc4_dev *vc4 = to_vc4_dev(dev);
216 if (state->async_update) {
217 ret = down_interruptible(&vc4->async_modeset);
221 ret = drm_atomic_helper_prepare_planes(dev, state);
223 up(&vc4->async_modeset);
227 drm_atomic_helper_async_commit(dev, state);
229 drm_atomic_helper_cleanup_planes(dev, state);
231 up(&vc4->async_modeset);
236 /* We know for sure we don't want an async update here. Set
237 * state->legacy_cursor_update to false to prevent
238 * drm_atomic_helper_setup_commit() from auto-completing
241 state->legacy_cursor_update = false;
242 ret = drm_atomic_helper_setup_commit(state, nonblock);
246 INIT_WORK(&state->commit_work, commit_work);
248 ret = down_interruptible(&vc4->async_modeset);
252 ret = drm_atomic_helper_prepare_planes(dev, state);
254 up(&vc4->async_modeset);
259 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
261 drm_atomic_helper_cleanup_planes(dev, state);
262 up(&vc4->async_modeset);
268 * This is the point of no return - everything below never fails except
269 * when the hw goes bonghits. Which means we can commit the new state on
270 * the software side now.
273 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
276 * Everything below can be run asynchronously without the need to grab
277 * any modeset locks at all under one condition: It must be guaranteed
278 * that the asynchronous work has either been cancelled (if the driver
279 * supports it, which at least requires that the framebuffers get
280 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
281 * before the new state gets committed on the software side with
282 * drm_atomic_helper_swap_state().
284 * This scheme allows new atomic state updates to be prepared and
285 * checked in parallel to the asynchronous completion of the previous
286 * update. Which is important since compositors need to figure out the
287 * composition of the next frame right after having submitted the
291 drm_atomic_state_get(state);
293 queue_work(system_unbound_wq, &state->commit_work);
295 vc4_atomic_complete_commit(state);
300 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
301 struct drm_file *file_priv,
302 const struct drm_mode_fb_cmd2 *mode_cmd)
304 struct drm_mode_fb_cmd2 mode_cmd_local;
306 /* If the user didn't specify a modifier, use the
307 * vc4_set_tiling_ioctl() state for the BO.
309 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
310 struct drm_gem_object *gem_obj;
313 gem_obj = drm_gem_object_lookup(file_priv,
314 mode_cmd->handles[0]);
316 DRM_DEBUG("Failed to look up GEM BO %d\n",
317 mode_cmd->handles[0]);
318 return ERR_PTR(-ENOENT);
320 bo = to_vc4_bo(gem_obj);
322 mode_cmd_local = *mode_cmd;
325 mode_cmd_local.modifier[0] =
326 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
328 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
331 drm_gem_object_put_unlocked(gem_obj);
333 mode_cmd = &mode_cmd_local;
336 return drm_gem_fb_create(dev, file_priv, mode_cmd);
339 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
340 * at a time and the HW only supports S0.9 scalars. To account for the latter,
341 * we don't allow userland to set a CTM that we have no hope of approximating.
344 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
346 struct vc4_dev *vc4 = to_vc4_dev(dev);
347 struct vc4_ctm_state *ctm_state = NULL;
348 struct drm_crtc *crtc;
349 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
350 struct drm_color_ctm *ctm;
353 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
354 /* CTM is being disabled. */
355 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
356 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
357 if (IS_ERR(ctm_state))
358 return PTR_ERR(ctm_state);
363 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
364 if (new_crtc_state->ctm == old_crtc_state->ctm)
368 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
369 if (IS_ERR(ctm_state))
370 return PTR_ERR(ctm_state);
373 /* CTM is being enabled or the matrix changed. */
374 if (new_crtc_state->ctm) {
375 /* fifo is 1-based since 0 disables CTM. */
376 int fifo = to_vc4_crtc(crtc)->channel + 1;
378 /* Check userland isn't trying to turn on CTM for more
379 * than one CRTC at a time.
381 if (ctm_state->fifo && ctm_state->fifo != fifo) {
382 DRM_DEBUG_DRIVER("Too many CTM configured\n");
386 /* Check we can approximate the specified CTM.
387 * We disallow scalars |c| > 1.0 since the HW has
390 ctm = new_crtc_state->ctm->data;
391 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
392 u64 val = ctm->matrix[i];
395 if (val > BIT_ULL(32))
399 ctm_state->fifo = fifo;
400 ctm_state->ctm = ctm;
407 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
409 struct drm_plane_state *old_plane_state, *new_plane_state;
410 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
411 struct vc4_load_tracker_state *load_state;
412 struct drm_private_state *priv_state;
413 struct drm_plane *plane;
416 priv_state = drm_atomic_get_private_obj_state(state,
418 if (IS_ERR(priv_state))
419 return PTR_ERR(priv_state);
421 load_state = to_vc4_load_tracker_state(priv_state);
422 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
423 new_plane_state, i) {
424 struct vc4_plane_state *vc4_plane_state;
426 if (old_plane_state->fb && old_plane_state->crtc) {
427 vc4_plane_state = to_vc4_plane_state(old_plane_state);
428 load_state->membus_load -= vc4_plane_state->membus_load;
429 load_state->hvs_load -= vc4_plane_state->hvs_load;
432 if (new_plane_state->fb && new_plane_state->crtc) {
433 vc4_plane_state = to_vc4_plane_state(new_plane_state);
434 load_state->membus_load += vc4_plane_state->membus_load;
435 load_state->hvs_load += vc4_plane_state->hvs_load;
439 /* Don't check the load when the tracker is disabled. */
440 if (!vc4->load_tracker_enabled)
443 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
444 * the system work when other blocks are accessing the memory.
446 if (load_state->membus_load > SZ_1G + SZ_512M)
449 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
450 * consider the maximum number of cycles is 240M.
452 if (load_state->hvs_load > 240000000ULL)
458 static struct drm_private_state *
459 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
461 struct vc4_load_tracker_state *state;
463 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
467 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
472 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
473 struct drm_private_state *state)
475 struct vc4_load_tracker_state *load_state;
477 load_state = to_vc4_load_tracker_state(state);
481 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
482 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
483 .atomic_destroy_state = vc4_load_tracker_destroy_state,
487 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
491 ret = vc4_ctm_atomic_check(dev, state);
495 ret = drm_atomic_helper_check(dev, state);
499 return vc4_load_tracker_atomic_check(state);
502 static const struct drm_mode_config_funcs vc4_mode_funcs = {
503 .atomic_check = vc4_atomic_check,
504 .atomic_commit = vc4_atomic_commit,
505 .fb_create = vc4_fb_create,
508 int vc4_kms_load(struct drm_device *dev)
510 struct vc4_dev *vc4 = to_vc4_dev(dev);
511 struct vc4_ctm_state *ctm_state;
512 struct vc4_load_tracker_state *load_state;
515 /* Start with the load tracker enabled. Can be disabled through the
516 * debugfs load_tracker file.
518 vc4->load_tracker_enabled = true;
520 sema_init(&vc4->async_modeset, 1);
522 /* Set support for vblank irq fast disable, before drm_vblank_init() */
523 dev->vblank_disable_immediate = true;
525 dev->irq_enabled = true;
526 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
528 dev_err(dev->dev, "failed to initialize vblank\n");
532 dev->mode_config.max_width = 2048;
533 dev->mode_config.max_height = 2048;
534 dev->mode_config.funcs = &vc4_mode_funcs;
535 dev->mode_config.preferred_depth = 24;
536 dev->mode_config.async_page_flip = true;
537 dev->mode_config.allow_fb_modifiers = true;
539 drm_modeset_lock_init(&vc4->ctm_state_lock);
541 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
545 drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
546 &vc4_ctm_state_funcs);
548 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
550 drm_atomic_private_obj_fini(&vc4->ctm_manager);
554 drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
555 &vc4_load_tracker_state_funcs);
557 drm_mode_config_reset(dev);
559 drm_kms_helper_poll_init(dev);