Merge tag 'drm-msm-fixes-2022-06-20' of https://gitlab.freedesktop.org/drm/msm into...
authorDave Airlie <airlied@redhat.com>
Fri, 24 Jun 2022 00:11:26 +0000 (10:11 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 24 Jun 2022 00:11:27 +0000 (10:11 +1000)
Fixes for v5.19-rc4

- Workaround for parade DSI bridge power sequencing
- Fix for multi-planar YUV format offsets
- Limiting WB modes to max sspp linewidth
- Fixing the supported rotations to add 180 back for IGT
- Fix to handle pm_runtime_get_sync() errors to avoid unclocked access
  in the bind() path for dpu driver
- Fix the irq_free() without request issue which was a being hit frequently
  in CI.
- Fix to add minimum ICC vote in the msm_mdss pm_resume path to address
  bootup splats
- Fix to avoid dereferencing without checking in WB encoder
- Fix to avoid crash during suspend in DP driver by ensuring interrupt
  mask bits are updated
- Remove unused code from dpu_encoder_virt_atomic_check()
- Fix to remove redundant init of dsc variable
- Fix to ensure mmap offset is initialized to avoid memory corruption
  from unpin/evict
- Fix double runpm disable in probe-defer path
- VMA fenced-unpin fixes
- Fix for WB max-width
- Fix for rare dp resolution change issue

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvdsOF1-+WfTWyEyu33XPcvxOCU00G-dz7EF2J+fdyUHg@mail.gmail.com
17 files changed:
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_ctrl.h
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_ringbuffer.c

index 4e665c806a14ea33e7f139bb4e138796835f680c..efe9840e28fadd950e624c92df3e542aeec42dab 100644 (file)
@@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu)
 
                ring->cur = ring->start;
                ring->next = ring->start;
-
-               /* reset completed fence seqno: */
-               ring->memptrs->fence = ring->fctx->completed_fence;
                ring->memptrs->rptr = 0;
+
+               /* Detect and clean up an impossible fence, ie. if GPU managed
+                * to scribble something invalid, we don't want that to confuse
+                * us into mistakingly believing that submits have completed.
+                */
+               if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
+                       ring->memptrs->fence = ring->fctx->last_fence;
+               }
        }
 
        return 0;
@@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
        for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
                release_firmware(adreno_gpu->fw[i]);
 
-       pm_runtime_disable(&priv->gpu_pdev->dev);
+       if (pm_runtime_enabled(&priv->gpu_pdev->dev))
+               pm_runtime_disable(&priv->gpu_pdev->dev);
 
        msm_gpu_cleanup(&adreno_gpu->base);
 }
index 399115e4e217f7b279b35f8bc0ba0aba8fee72fa..2fd787079f9b024875e502e5b2c4fcaa2a11221a 100644 (file)
@@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
        struct msm_drm_private *priv = dev->dev_private;
        struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
 
-       return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth,
+       /*
+        * We should ideally be limiting the modes only to the maxlinewidth but
+        * on some chipsets this will allow even 4k modes to be added which will
+        * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
+        * and source split support added lets limit the modes based on max_mixer_width
+        * as 4K modes can then be supported.
+        */
+       return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
                        dev->mode_config.max_height);
 }
 
index fb48c8c19ec3aa05b465ff2004555dedc759955f..17cb1fc783795248bd7dfd33237c20e19a5bd363 100644 (file)
@@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
                encoder = mdp4_lcdc_encoder_init(dev, panel_node);
                if (IS_ERR(encoder)) {
                        DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+                       of_node_put(panel_node);
                        return PTR_ERR(encoder);
                }
 
@@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
                connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
                if (IS_ERR(connector)) {
                        DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+                       of_node_put(panel_node);
                        return PTR_ERR(connector);
                }
 
index b7f5b8d3bbd6138bed6903bc3d3ae9974ee0911f..703249384e7c7d0b000b2b51f89c45791ab6684e 100644 (file)
@@ -1534,6 +1534,8 @@ end:
        return ret;
 }
 
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
+
 static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
 {
        int ret = 0;
@@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
 
        ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
        if (!ret)
-               ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
+               ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
        else
                DRM_ERROR("failed to enable DP link controller\n");
 
@@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
        return dp_ctrl_setup_main_link(ctrl, &training_step);
 }
 
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
+{
+       int ret;
+       struct dp_ctrl_private *ctrl;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+       ret = dp_ctrl_enable_stream_clocks(ctrl);
+       if (ret) {
+               DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+               return ret;
+       }
+
+       dp_ctrl_send_phy_test_pattern(ctrl);
+
+       return 0;
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
 {
        int ret = 0;
        bool mainlink_ready = false;
@@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
                goto end;
        }
 
-       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
-               dp_ctrl_send_phy_test_pattern(ctrl);
-               return 0;
-       }
-
-       if (!dp_ctrl_channel_eq_ok(ctrl))
+       if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
                dp_ctrl_link_retrain(ctrl);
 
        /* stop txing train pattern to end link training */
index 0745fde01b45d9963642c7fb24a495c6121f19ea..b563e2e3bfe5d225ef23c392365359e05f50888f 100644 (file)
@@ -21,7 +21,7 @@ struct dp_ctrl {
 };
 
 int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
 int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
 int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
 int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
index bce77935394f07c96d162c6e27a1ff7062693e8f..ec26855079c181a76cfb0d405f70d3c5a6ec6575 100644 (file)
@@ -309,7 +309,8 @@ static void dp_display_unbind(struct device *dev, struct device *master,
        struct msm_drm_private *priv = dev_get_drvdata(master);
 
        /* disable all HPD interrupts */
-       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+       if (dp->core_initialized)
+               dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
 
        kthread_stop(dp->ev_tsk);
 
@@ -872,7 +873,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
                return 0;
        }
 
-       rc = dp_ctrl_on_stream(dp->ctrl);
+       rc = dp_ctrl_on_stream(dp->ctrl, data);
        if (!rc)
                dp_display->power_on = true;
 
@@ -1659,6 +1660,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
        int rc = 0;
        struct dp_display_private *dp_display;
        u32 state;
+       bool force_link_train = false;
 
        dp_display = container_of(dp, struct dp_display_private, dp_display);
        if (!dp_display->dp_mode.drm_mode.clock) {
@@ -1693,10 +1695,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
 
        state =  dp_display->hpd_state;
 
-       if (state == ST_DISPLAY_OFF)
+       if (state == ST_DISPLAY_OFF) {
                dp_display_host_phy_init(dp_display);
+               force_link_train = true;
+       }
 
-       dp_display_enable(dp_display, 0);
+       dp_display_enable(dp_display, force_link_train);
 
        rc = dp_display_post_enable(dp);
        if (rc) {
@@ -1705,10 +1709,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
                dp_display_unprepare(dp);
        }
 
-       /* manual kick off plug event to train link */
-       if (state == ST_DISPLAY_OFF)
-               dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
-
        /* completed connection */
        dp_display->hpd_state = ST_CONNECTED;
 
index 44485363f37a08d5e5c9b9170a8e9d30206bb3ef..14ab9a627d8b036b72d6ca6880298b1b2148793b 100644 (file)
@@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = {
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
-       .gem_prime_mmap     = drm_gem_prime_mmap,
+       .gem_prime_mmap     = msm_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
 #endif
index 08388d742d6538234544a63b302bbaaa6c9ab4a6..099a67d10c3a76ae3e0d13adb1fd6fd93f942d71 100644 (file)
@@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
 void msm_gem_shrinker_init(struct drm_device *dev);
 void msm_gem_shrinker_cleanup(struct drm_device *dev);
 
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
index 3df255402a33c1d9de601768cfc8c60631242a34..38e3323bc2324ee2bae81d9a359405610cdc59f1 100644 (file)
@@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
                (int32_t)(*fctx->fenceptr - fence) >= 0;
 }
 
-/* called from workqueue */
+/* called from irq handler and workqueue (in recover path) */
 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 {
-       spin_lock(&fctx->spinlock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fctx->spinlock, flags);
        fctx->completed_fence = max(fence, fctx->completed_fence);
-       spin_unlock(&fctx->spinlock);
+       spin_unlock_irqrestore(&fctx->spinlock, flags);
 }
 
 struct msm_fence {
index 97d5b4d8b9b05c645a8b6acd663e9d8749560d25..7f92231785a0e49c25181bd875da60fa7d2b0fb2 100644 (file)
@@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
        return ret;
 }
 
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+void msm_gem_unpin_locked(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
        GEM_WARN_ON(!msm_gem_is_locked(obj));
 
-       msm_gem_unpin_vma(vma);
-
        msm_obj->pin_count--;
        GEM_WARN_ON(msm_obj->pin_count < 0);
 
@@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
        msm_gem_lock(obj);
        vma = lookup_vma(obj, aspace);
        if (!GEM_WARN_ON(!vma)) {
-               msm_gem_unpin_vma_locked(obj, vma);
+               msm_gem_unpin_vma(vma);
+               msm_gem_unpin_locked(obj);
        }
        msm_gem_unlock(obj);
 }
index c75d3b879a53bfbd3f3141db26ccfeb05d9be22c..6b7d5bb3b575fa3e77a6509328f47ed2fc2d5c24 100644 (file)
@@ -145,7 +145,7 @@ struct msm_gem_object {
 
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+void msm_gem_unpin_locked(struct drm_gem_object *obj);
 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
                                           struct msm_gem_address_space *aspace);
 int msm_gem_get_iova(struct drm_gem_object *obj,
@@ -377,10 +377,11 @@ struct msm_gem_submit {
        } *cmd;  /* array of size nr_cmds */
        struct {
 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED   0x4000   /* obj lock is held */
-#define BO_ACTIVE   0x2000   /* active refcnt is held */
-#define BO_PINNED   0x1000   /* obj is pinned and on active list */
+#define BO_VALID       0x8000  /* is current addr in cmdstream correct/valid? */
+#define BO_LOCKED      0x4000  /* obj lock is held */
+#define BO_ACTIVE      0x2000  /* active refcnt is held */
+#define BO_OBJ_PINNED  0x1000  /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED  0x0800  /* vma (virtual address) is pinned */
                uint32_t flags;
                union {
                        struct msm_gem_object *obj;
index 94ab705e9b8a430c551ca9a9a08686c5b61dfaf2..dcc8a573bc7625d1d7ef7611372e096fb7f2e118 100644 (file)
 #include "msm_drv.h"
 #include "msm_gem.h"
 
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+       int ret;
+
+       /* Ensure the mmap offset is initialized.  We lazily initialize it,
+        * so if it has not been first mmap'd directly as a GEM object, the
+        * mmap offset will not be already initialized.
+        */
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               return ret;
+
+       return drm_gem_prime_mmap(obj, vma);
+}
+
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
index 80975229b4dea3764be045556cf2b62fe7e55ad5..3c3a0cfade369b96e39ce408990079eaa8e0c6cf 100644 (file)
@@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
         */
        submit->bos[i].flags &= ~cleanup_flags;
 
-       if (flags & BO_PINNED)
-               msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
+       if (flags & BO_VMA_PINNED)
+               msm_gem_unpin_vma(submit->bos[i].vma);
+
+       if (flags & BO_OBJ_PINNED)
+               msm_gem_unpin_locked(obj);
 
        if (flags & BO_ACTIVE)
                msm_gem_active_put(obj);
@@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
 
 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
 {
-       submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED);
+       unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
+                                BO_ACTIVE | BO_LOCKED;
+       submit_cleanup_bo(submit, i, cleanup_flags);
 
        if (!(submit->bos[i].flags & BO_VALID))
                submit->bos[i].iova = 0;
@@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
                if (ret)
                        break;
 
-               submit->bos[i].flags |= BO_PINNED;
+               submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
                submit->bos[i].vma = vma;
 
                if (vma->iova == submit->bos[i].iova) {
@@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error)
        unsigned i;
 
        if (error)
-               cleanup_flags |= BO_PINNED | BO_ACTIVE;
+               cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit)
                struct drm_gem_object *obj = &submit->bos[i].obj->base;
 
                msm_gem_lock(obj);
-               submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE);
+               /* Note, VMA already fence-unpinned before submit: */
+               submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
                msm_gem_unlock(obj);
                drm_gem_object_put(obj);
        }
index 3c1dc9241831b62d7c49e60af4c1eea0b5e7537d..c471aebcdbab2b5b0760c23350a183b16c091c93 100644 (file)
@@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
        unsigned size = vma->node.size;
 
        /* Print a message if we try to purge a vma in use */
-       if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
-               return;
+       GEM_WARN_ON(msm_gem_vma_inuse(vma));
 
        /* Don't do anything if the memory isn't mapped */
        if (!vma->mapped)
@@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma)
 {
-       if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
-               return;
+       GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
 
        spin_lock(&aspace->lock);
        if (vma->iova)
index eb8a6663f309f49326b3eeb317b41de316e3e2ad..c8cd9bfa3eebda524ce8af1437e8bbaf037bf159 100644 (file)
@@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
        return ret;
 }
 
-static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
-               uint32_t fence)
-{
-       struct msm_gem_submit *submit;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ring->submit_lock, flags);
-       list_for_each_entry(submit, &ring->submits, node) {
-               if (fence_after(submit->seqno, fence))
-                       break;
-
-               msm_update_fence(submit->ring->fctx,
-                       submit->hw_fence->seqno);
-               dma_fence_signal(submit->hw_fence);
-       }
-       spin_unlock_irqrestore(&ring->submit_lock, flags);
-}
-
 #ifdef CONFIG_DEV_COREDUMP
 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
                size_t count, void *data, size_t datalen)
@@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work)
                 * one more to clear the faulting submit
                 */
                if (ring == cur_ring)
-                       fence++;
+                       ring->memptrs->fence = ++fence;
 
-               update_fences(gpu, ring, fence);
+               msm_update_fence(ring->fctx, fence);
        }
 
        if (msm_gpu_active(gpu)) {
@@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
        msm_submit_retire(submit);
 
        pm_runtime_mark_last_busy(&gpu->pdev->dev);
-       pm_runtime_put_autosuspend(&gpu->pdev->dev);
 
        spin_lock_irqsave(&ring->submit_lock, flags);
        list_del(&submit->node);
@@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
                msm_devfreq_idle(gpu);
        mutex_unlock(&gpu->active_lock);
 
+       pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
        msm_gem_submit_put(submit);
 }
 
@@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
        int i;
 
        for (i = 0; i < gpu->nr_rings; i++)
-               update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
+               msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
 
        kthread_queue_work(gpu->worker, &gpu->retire_work);
        update_sw_cntrs(gpu);
index bcaddbba564df0b189aa4ece9c64de3935aff8ba..a54ed354578b53c1ce53dd35831fa4260d434372 100644 (file)
@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
        u64 addr = iova;
        unsigned int i;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+       for_each_sgtable_sg(sgt, sg, i) {
                size_t size = sg->length;
                phys_addr_t phys = sg_phys(sg);
 
index 43066320ff8c9feeec5b092e31cb151f99f6ac13..56eecb4a72dc6d3e270316e88b20fcd37932117e 100644 (file)
@@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 
                msm_gem_lock(obj);
                msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
-               submit->bos[i].flags &= ~BO_PINNED;
+               submit->bos[i].flags &= ~BO_VMA_PINNED;
                msm_gem_unlock(obj);
        }