2 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__
17 #include "dpu_hw_lm.h"
18 #include "dpu_hw_ctl.h"
19 #include "dpu_hw_pingpong.h"
20 #include "dpu_hw_intf.h"
21 #include "dpu_encoder.h"
22 #include "dpu_trace.h"
24 #define RESERVED_BY_OTHER(h, r) \
25 ((h)->enc_id && (h)->enc_id != r)
28 * struct dpu_rm_requirements - Reservation requirements parameter bundle
29 * @topology: selected topology for the display
30 * @hw_res: Hardware resources required as reported by the encoders
32 struct dpu_rm_requirements {
33 struct msm_display_topology topology;
34 struct dpu_encoder_hw_resources hw_res;
39 * struct dpu_rm_hw_blk - hardware block tracking list member
40 * @list: List head for list of all hardware blocks tracking items
41 * @id: Hardware ID number, within it's own space, ie. LM_X
42 * @enc_id: Encoder id to which this blk is binded
43 * @hw: Pointer to the hardware register access object for this block
45 struct dpu_rm_hw_blk {
46 struct list_head list;
49 struct dpu_hw_blk *hw;
52 void dpu_rm_init_hw_iter(
53 struct dpu_rm_hw_iter *iter,
55 enum dpu_hw_blk_type type)
57 memset(iter, 0, sizeof(*iter));
58 iter->enc_id = enc_id;
62 static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
64 struct list_head *blk_list;
66 if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
67 DPU_ERROR("invalid rm\n");
72 blk_list = &rm->hw_blks[i->type];
74 if (i->blk && (&i->blk->list == blk_list)) {
75 DPU_DEBUG("attempt resume iteration past last\n");
79 i->blk = list_prepare_entry(i->blk, blk_list, list);
81 list_for_each_entry_continue(i->blk, blk_list, list) {
82 if (i->enc_id == i->blk->enc_id) {
84 DPU_DEBUG("found type %d id %d for enc %d\n",
85 i->type, i->blk->id, i->enc_id);
90 DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
95 bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
99 mutex_lock(&rm->rm_lock);
100 ret = _dpu_rm_get_hw_locked(rm, i);
101 mutex_unlock(&rm->rm_lock);
106 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
110 dpu_hw_lm_destroy(hw);
113 dpu_hw_ctl_destroy(hw);
115 case DPU_HW_BLK_PINGPONG:
116 dpu_hw_pingpong_destroy(hw);
118 case DPU_HW_BLK_INTF:
119 dpu_hw_intf_destroy(hw);
121 case DPU_HW_BLK_SSPP:
122 /* SSPPs are not managed by the resource manager */
124 /* Top is a singleton, not managed in hw_blks list */
127 DPU_ERROR("unsupported block type %d\n", type);
132 int dpu_rm_destroy(struct dpu_rm *rm)
134 struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
135 enum dpu_hw_blk_type type;
137 for (type = 0; type < DPU_HW_BLK_MAX; type++) {
138 list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
140 list_del(&hw_cur->list);
141 _dpu_rm_hw_destroy(type, hw_cur->hw);
146 mutex_destroy(&rm->rm_lock);
151 static int _dpu_rm_hw_blk_create(
153 struct dpu_mdss_cfg *cat,
155 enum dpu_hw_blk_type type,
157 void *hw_catalog_info)
159 struct dpu_rm_hw_blk *blk;
164 hw = dpu_hw_lm_init(id, mmio, cat);
167 hw = dpu_hw_ctl_init(id, mmio, cat);
169 case DPU_HW_BLK_PINGPONG:
170 hw = dpu_hw_pingpong_init(id, mmio, cat);
172 case DPU_HW_BLK_INTF:
173 hw = dpu_hw_intf_init(id, mmio, cat);
175 case DPU_HW_BLK_SSPP:
176 /* SSPPs are not managed by the resource manager */
178 /* Top is a singleton, not managed in hw_blks list */
181 DPU_ERROR("unsupported block type %d\n", type);
185 if (IS_ERR_OR_NULL(hw)) {
186 DPU_ERROR("failed hw object creation: type %d, err %ld\n",
191 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
193 _dpu_rm_hw_destroy(type, hw);
200 list_add_tail(&blk->list, &rm->hw_blks[type]);
205 int dpu_rm_init(struct dpu_rm *rm,
206 struct dpu_mdss_cfg *cat,
210 enum dpu_hw_blk_type type;
212 if (!rm || !cat || !mmio) {
213 DPU_ERROR("invalid kms\n");
217 /* Clear, setup lists */
218 memset(rm, 0, sizeof(*rm));
220 mutex_init(&rm->rm_lock);
222 for (type = 0; type < DPU_HW_BLK_MAX; type++)
223 INIT_LIST_HEAD(&rm->hw_blks[type]);
225 /* Interrogate HW catalog and create tracking items for hw blocks */
226 for (i = 0; i < cat->mixer_count; i++) {
227 struct dpu_lm_cfg *lm = &cat->mixer[i];
229 if (lm->pingpong == PINGPONG_MAX) {
230 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
234 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
235 cat->mixer[i].id, &cat->mixer[i]);
237 DPU_ERROR("failed: lm hw not available\n");
241 if (!rm->lm_max_width) {
242 rm->lm_max_width = lm->sblk->maxwidth;
243 } else if (rm->lm_max_width != lm->sblk->maxwidth) {
245 * Don't expect to have hw where lm max widths differ.
246 * If found, take the min.
248 DPU_ERROR("unsupported: lm maxwidth differs\n");
249 if (rm->lm_max_width > lm->sblk->maxwidth)
250 rm->lm_max_width = lm->sblk->maxwidth;
254 for (i = 0; i < cat->pingpong_count; i++) {
255 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
256 cat->pingpong[i].id, &cat->pingpong[i]);
258 DPU_ERROR("failed: pp hw not available\n");
263 for (i = 0; i < cat->intf_count; i++) {
264 if (cat->intf[i].type == INTF_NONE) {
265 DPU_DEBUG("skip intf %d with type none\n", i);
269 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
270 cat->intf[i].id, &cat->intf[i]);
272 DPU_ERROR("failed: intf hw not available\n");
277 for (i = 0; i < cat->ctl_count; i++) {
278 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
279 cat->ctl[i].id, &cat->ctl[i]);
281 DPU_ERROR("failed: ctl hw not available\n");
294 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
296 return top->num_intf > 1;
300 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
301 * proposed use case requirements, incl. hardwired dependent blocks like
303 * @rm: dpu resource manager handle
304 * @enc_id: encoder id requesting for allocation
305 * @reqs: proposed use case requirements
306 * @lm: proposed layer mixer, function checks if lm, and all other hardwired
307 * blocks connected to the lm (pp) is available and appropriate
308 * @pp: output parameter, pingpong block attached to the layer mixer.
309 * NULL if pp was not available, or not matching requirements.
310 * @primary_lm: if non-null, this function check if lm is compatible primary_lm
311 * as well as satisfying all other requirements
312 * @Return: true if lm matches all requirements, false otherwise
314 static bool _dpu_rm_check_lm_and_get_connected_blks(
317 struct dpu_rm_requirements *reqs,
318 struct dpu_rm_hw_blk *lm,
319 struct dpu_rm_hw_blk **pp,
320 struct dpu_rm_hw_blk *primary_lm)
322 const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
323 struct dpu_rm_hw_iter iter;
327 DPU_DEBUG("check lm %d pp %d\n",
328 lm_cfg->id, lm_cfg->pingpong);
330 /* Check if this layer mixer is a peer of the proposed primary LM */
332 const struct dpu_lm_cfg *prim_lm_cfg =
333 to_dpu_hw_mixer(primary_lm->hw)->cap;
335 if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
336 DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
342 /* Already reserved? */
343 if (RESERVED_BY_OTHER(lm, enc_id)) {
344 DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
348 dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
349 while (_dpu_rm_get_hw_locked(rm, &iter)) {
350 if (iter.blk->id == lm_cfg->pingpong) {
357 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
361 if (RESERVED_BY_OTHER(*pp, enc_id)) {
362 DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
370 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
371 struct dpu_rm_requirements *reqs)
374 struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
375 struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
376 struct dpu_rm_hw_iter iter_i, iter_j;
380 if (!reqs->topology.num_lm) {
381 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
385 /* Find a primary mixer */
386 dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
387 while (lm_count != reqs->topology.num_lm &&
388 _dpu_rm_get_hw_locked(rm, &iter_i)) {
389 memset(&lm, 0, sizeof(lm));
390 memset(&pp, 0, sizeof(pp));
393 lm[lm_count] = iter_i.blk;
395 if (!_dpu_rm_check_lm_and_get_connected_blks(
396 rm, enc_id, reqs, lm[lm_count],
397 &pp[lm_count], NULL))
402 /* Valid primary mixer found, find matching peers */
403 dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
405 while (lm_count != reqs->topology.num_lm &&
406 _dpu_rm_get_hw_locked(rm, &iter_j)) {
407 if (iter_i.blk == iter_j.blk)
410 if (!_dpu_rm_check_lm_and_get_connected_blks(
411 rm, enc_id, reqs, iter_j.blk,
412 &pp[lm_count], iter_i.blk))
415 lm[lm_count] = iter_j.blk;
420 if (lm_count != reqs->topology.num_lm) {
421 DPU_DEBUG("unable to find appropriate mixers\n");
425 for (i = 0; i < ARRAY_SIZE(lm); i++) {
429 lm[i]->enc_id = enc_id;
430 pp[i]->enc_id = enc_id;
432 trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
438 static int _dpu_rm_reserve_ctls(
441 const struct msm_display_topology *top)
443 struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
444 struct dpu_rm_hw_iter iter;
445 int i = 0, num_ctls = 0;
446 bool needs_split_display = false;
448 memset(&ctls, 0, sizeof(ctls));
450 /* each hw_intf needs its own hw_ctrl to program its control path */
451 num_ctls = top->num_intf;
453 needs_split_display = _dpu_rm_needs_split_display(top);
455 dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
456 while (_dpu_rm_get_hw_locked(rm, &iter)) {
457 const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
458 unsigned long features = ctl->caps->features;
459 bool has_split_display;
461 if (RESERVED_BY_OTHER(iter.blk, enc_id))
464 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
466 DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
468 if (needs_split_display != has_split_display)
472 DPU_DEBUG("ctl %d match\n", iter.blk->id);
481 for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
482 ctls[i]->enc_id = enc_id;
483 trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
489 static int _dpu_rm_reserve_intf(
493 enum dpu_hw_blk_type type)
495 struct dpu_rm_hw_iter iter;
498 /* Find the block entry in the rm, and note the reservation */
499 dpu_rm_init_hw_iter(&iter, 0, type);
500 while (_dpu_rm_get_hw_locked(rm, &iter)) {
501 if (iter.blk->id != id)
504 if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
505 DPU_ERROR("type %d id %d already reserved\n", type, id);
509 iter.blk->enc_id = enc_id;
510 trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
514 /* Shouldn't happen since intfs are fixed at probe */
516 DPU_ERROR("couldn't find type %d id %d\n", type, id);
523 static int _dpu_rm_reserve_intf_related_hw(
526 struct dpu_encoder_hw_resources *hw_res)
531 for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
532 if (hw_res->intfs[i] == INTF_MODE_NONE)
535 ret = _dpu_rm_reserve_intf(rm, enc_id, id,
544 static int _dpu_rm_make_reservation(
546 struct drm_encoder *enc,
547 struct drm_crtc_state *crtc_state,
548 struct dpu_rm_requirements *reqs)
552 ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
554 DPU_ERROR("unable to find appropriate mixers\n");
558 ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
560 DPU_ERROR("unable to find appropriate CTL\n");
564 ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
571 static int _dpu_rm_populate_requirements(
573 struct drm_encoder *enc,
574 struct drm_crtc_state *crtc_state,
575 struct dpu_rm_requirements *reqs,
576 struct msm_display_topology req_topology)
578 dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
580 reqs->topology = req_topology;
582 DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
583 reqs->topology.num_lm, reqs->topology.num_enc,
584 reqs->topology.num_intf);
589 static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
591 struct dpu_rm_hw_blk *blk;
592 enum dpu_hw_blk_type type;
594 for (type = 0; type < DPU_HW_BLK_MAX; type++) {
595 list_for_each_entry(blk, &rm->hw_blks[type], list) {
596 if (blk->enc_id == enc_id) {
598 DPU_DEBUG("rel enc %d %d %d\n", enc_id,
605 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
607 mutex_lock(&rm->rm_lock);
609 _dpu_rm_release_reservation(rm, enc->base.id);
611 mutex_unlock(&rm->rm_lock);
616 struct drm_encoder *enc,
617 struct drm_crtc_state *crtc_state,
618 struct msm_display_topology topology,
621 struct dpu_rm_requirements reqs;
624 /* Check if this is just a page-flip */
625 if (!drm_atomic_crtc_needs_modeset(crtc_state))
628 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
629 enc->base.id, crtc_state->crtc->base.id, test_only);
631 mutex_lock(&rm->rm_lock);
633 ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
636 DPU_ERROR("failed to populate hw requirements\n");
640 ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
642 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
643 _dpu_rm_release_reservation(rm, enc->base.id);
644 } else if (test_only) {
645 /* test_only: test the reservation and then undo */
646 DPU_DEBUG("test_only: discard test [enc: %d]\n",
648 _dpu_rm_release_reservation(rm, enc->base.id);
652 mutex_unlock(&rm->rm_lock);