Merge tag 'drm-intel-next-2023-08-03' of git://anongit.freedesktop.org/drm/drm-intel...
authorDave Airlie <airlied@redhat.com>
Mon, 7 Aug 2023 03:18:12 +0000 (13:18 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 7 Aug 2023 03:47:30 +0000 (13:47 +1000)
- Removing unused declarations (Arnd, Gustavo)
- ICL+ DSI modeset sequence fixes (Ville)
- Improvements on HDCP (Suraj)
- Fixes and clean up on MTL Display (Mika Kahola, Lee, RK, Nirmoy, Chaitanya)
- Restore HSW/BDW PSR1 (Ville)
- Other PSR Fixes (Jouni)
- Fixes around DC states and other Display Power (Imre)
- Init DDI ports in VBT order (Ville)
- General documentation fixes (Jani)
- General refactor for better organization (Jani)
- Bigjoiner fix (Stanislav)
- VDSC Fixes and improvements (Stanialav, Suraj)
- Hotplug fixes and improvements (Simon, Suraj)
- Start using plane scale factor for relative data rate (Stanislav)
- Use shmem for dpt objects (RK)
- Simplify expression &to_i915(dev)->drm (Uwe)
- Do not access i915_gem_object members from frontbuffer tracking (Jouni)
- Fix uncore race around i915->params.mmio_debug (Jani)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZMv4RCzGyCmG/BDe@intel.com
1  2 
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/gt/uc/intel_huc.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_gpu_error.c

index 9f40da20e88d28e6dca7fa3286f3b4d970a34002,def5d8a98b4011d017c49c956a1bfd097d920ef4..03675620e3eadc6331320dcbc618c3e99612857c
@@@ -713,9 -713,18 +713,18 @@@ u32 intel_dp_dsc_nearest_valid_bpp(stru
  
                /*
                 * According to BSpec, 27 is the max DSC output bpp,
-                * 8 is the min DSC output bpp
+                * 8 is the min DSC output bpp.
+                * While we can still clamp higher bpp values to 27, saving bandwidth,
+                * if it is required to oompress up to bpp < 8, means we can't do
+                * that and probably means we can't fit the required mode, even with
+                * DSC enabled.
                 */
-               bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27);
+               if (bits_per_pixel < 8) {
+                       drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
+                                   bits_per_pixel);
+                       return 0;
+               }
+               bits_per_pixel = min_t(u32, bits_per_pixel, 27);
        } else {
                /* Find the nearest match in the array of known BPPs from VESA */
                for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
@@@ -4069,7 -4078,9 +4078,7 @@@ intel_dp_mst_hpd_irq(struct intel_dp *i
  {
        bool handled = false;
  
 -      drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
 -      if (handled)
 -              ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
 +      drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
  
        if (esi[1] & DP_CP_IRQ) {
                intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
@@@ -4144,9 -4155,6 +4153,9 @@@ intel_dp_check_mst_status(struct intel_
  
                if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
                        drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
 +
 +              if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
 +                      drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
        }
  
        return link_ok;
index f1439827bc80e591d5a5c61b82966448c00a9bbc,e75852f13930fd5fc61b27e97996c141dfa03073..31d0d695d5671b0c6fc05d35c0d5443257158a75
@@@ -85,9 -85,9 +85,9 @@@ static void intel_fbdev_invalidate(stru
        intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
  }
  
 -FB_GEN_DEFAULT_DEFERRED_IO_OPS(intel_fbdev,
 -                             drm_fb_helper_damage_range,
 -                             drm_fb_helper_damage_area)
 +FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(intel_fbdev,
 +                                drm_fb_helper_damage_range,
 +                                drm_fb_helper_damage_area)
  
  static int intel_fbdev_set_par(struct fb_info *info)
  {
@@@ -135,9 -135,6 +135,6 @@@ static int intel_fbdev_mmap(struct fb_i
        return i915_gem_fb_mmap(obj, vma);
  }
  
- __diag_push();
- __diag_ignore_all("-Woverride-init", "Allow overriding the default ops");
  static const struct fb_ops intelfb_ops = {
        .owner = THIS_MODULE,
        __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
        .fb_mmap = intel_fbdev_mmap,
  };
  
- __diag_pop();
  static int intelfb_alloc(struct drm_fb_helper *helper,
                         struct drm_fb_helper_surface_size *sizes)
  {
                 * If the FB is too big, just don't use it since fbdev is not very
                 * important and we should probably use that space with FBC or other
                 * features.
+                *
+                * Also skip stolen on MTL as Wa_22018444074 mitigation.
                 */
-               if (size * 2 < dev_priv->dsm.usable_size)
+               if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
                        obj = i915_gem_object_create_stolen(dev_priv, size);
                if (IS_ERR(obj))
                        obj = i915_gem_object_create_shmem(dev_priv, size);
index ddd146265beb42b399b2c9b49f459fbebddc9ee8,40525c74321a48f0e681b668456c7767f3d0c41e..ba2ce607a6133a4557a87e028ee4695ffae05b45
@@@ -26,6 -26,7 +26,7 @@@
   * The kernel driver is only responsible for loading the HuC firmware and
   * triggering its security authentication. This is done differently depending
   * on the platform:
+  *
   * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
   *   and the authentication via GuC
   * - DG2: load and authentication are both performed via GSC.
@@@ -33,6 -34,7 +34,7 @@@
   *   not-DG2 older platforms), while the authentication is done in 2-steps,
   *   a first auth for clear-media workloads via GuC and a second one for all
   *   workloads via GSC.
+  *
   * On platforms where the GuC does the authentication, to correctly do so the
   * HuC binary must be loaded before the GuC one.
   * Loading the HuC is optional; however, not using the HuC might negatively
@@@ -265,7 -267,7 +267,7 @@@ static bool vcs_supported(struct intel_
        GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
  
        if (gt_is_root(gt))
-               mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+               mask = INTEL_INFO(gt->i915)->platform_engine_mask;
        else
                mask = gt->info.engine_mask;
  
@@@ -384,7 -386,6 +386,7 @@@ int intel_huc_init(struct intel_huc *hu
  
                vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
                if (IS_ERR(vma)) {
 +                      err = PTR_ERR(vma);
                        huc_info(huc, "Failed to allocate heci pkt\n");
                        goto out;
                }
index 222d0a1f3b5588f848b0358f3ff0cb2e3566fc26,6664f9416ec2d8fd1b6286aa407e9da35ff4d728..294b022de22bb08acb06441fda2a4c486cfd875e
@@@ -711,6 -711,8 +711,8 @@@ static void i915_welcome_messages(struc
  
                intel_device_info_print(INTEL_INFO(dev_priv),
                                        RUNTIME_INFO(dev_priv), &p);
+               intel_display_device_info_print(DISPLAY_INFO(dev_priv),
+                                               DISPLAY_RUNTIME_INFO(dev_priv), &p);
                i915_print_iommu_status(dev_priv, &p);
                for_each_gt(gt, dev_priv, i)
                        intel_gt_info_print(&gt->info, &p);
@@@ -1816,8 -1818,10 +1818,8 @@@ static const struct drm_driver i915_drm
        .open = i915_driver_open,
        .lastclose = i915_driver_lastclose,
        .postclose = i915_driver_postclose,
 -      .show_fdinfo = i915_drm_client_fdinfo,
 +      .show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
  
 -      .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 -      .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = i915_gem_prime_import,
  
        .dumb_create = i915_gem_dumb_create,
index 0c38bfb60c9a86e275f4c7eeccfe5ad6b1a18328,f109656cb1cc74b516a370264b8c46d6f64d2bfd..93d42f43caa530a1c72d8584240134f4b3183016
@@@ -187,64 -187,64 +187,64 @@@ i915_error_printer(struct drm_i915_erro
  }
  
  /* single threaded page allocator with a reserved stash for emergencies */
 -static void pool_fini(struct pagevec *pv)
 +static void pool_fini(struct folio_batch *fbatch)
  {
 -      pagevec_release(pv);
 +      folio_batch_release(fbatch);
  }
  
 -static int pool_refill(struct pagevec *pv, gfp_t gfp)
 +static int pool_refill(struct folio_batch *fbatch, gfp_t gfp)
  {
 -      while (pagevec_space(pv)) {
 -              struct page *p;
 +      while (folio_batch_space(fbatch)) {
 +              struct folio *folio;
  
 -              p = alloc_page(gfp);
 -              if (!p)
 +              folio = folio_alloc(gfp, 0);
 +              if (!folio)
                        return -ENOMEM;
  
 -              pagevec_add(pv, p);
 +              folio_batch_add(fbatch, folio);
        }
  
        return 0;
  }
  
 -static int pool_init(struct pagevec *pv, gfp_t gfp)
 +static int pool_init(struct folio_batch *fbatch, gfp_t gfp)
  {
        int err;
  
 -      pagevec_init(pv);
 +      folio_batch_init(fbatch);
  
 -      err = pool_refill(pv, gfp);
 +      err = pool_refill(fbatch, gfp);
        if (err)
 -              pool_fini(pv);
 +              pool_fini(fbatch);
  
        return err;
  }
  
 -static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
 +static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp)
  {
 -      struct page *p;
 +      struct folio *folio;
  
 -      p = alloc_page(gfp);
 -      if (!p && pagevec_count(pv))
 -              p = pv->pages[--pv->nr];
 +      folio = folio_alloc(gfp, 0);
 +      if (!folio && folio_batch_count(fbatch))
 +              folio = fbatch->folios[--fbatch->nr];
  
 -      return p ? page_address(p) : NULL;
 +      return folio ? folio_address(folio) : NULL;
  }
  
 -static void pool_free(struct pagevec *pv, void *addr)
 +static void pool_free(struct folio_batch *fbatch, void *addr)
  {
 -      struct page *p = virt_to_page(addr);
 +      struct folio *folio = virt_to_folio(addr);
  
 -      if (pagevec_space(pv))
 -              pagevec_add(pv, p);
 +      if (folio_batch_space(fbatch))
 +              folio_batch_add(fbatch, folio);
        else
 -              __free_page(p);
 +              folio_put(folio);
  }
  
  #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
  
  struct i915_vma_compress {
 -      struct pagevec pool;
 +      struct folio_batch pool;
        struct z_stream_s zstream;
        void *tmp;
  };
@@@ -381,7 -381,7 +381,7 @@@ static void err_compression_marker(stru
  #else
  
  struct i915_vma_compress {
 -      struct pagevec pool;
 +      struct folio_batch pool;
  };
  
  static bool compress_init(struct i915_vma_compress *c)
@@@ -649,6 -649,8 +649,8 @@@ static void err_print_capabilities(stru
        struct drm_printer p = i915_error_printer(m);
  
        intel_device_info_print(&error->device_info, &error->runtime_info, &p);
+       intel_display_device_info_print(&error->display_device_info,
+                                       &error->display_runtime_info, &p);
        intel_driver_caps_print(&error->driver_caps, &p);
  }
  
@@@ -1983,6 -1985,10 +1985,10 @@@ static void capture_gen(struct i915_gpu
        memcpy(&error->runtime_info,
               RUNTIME_INFO(i915),
               sizeof(error->runtime_info));
+       memcpy(&error->display_device_info, DISPLAY_INFO(i915),
+              sizeof(error->display_device_info));
+       memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915),
+              sizeof(error->display_runtime_info));
        error->driver_caps = i915->caps;
  }