Merge tag 'drm-misc-next-2019-01-23' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Thu, 24 Jan 2019 09:52:46 +0000 (19:52 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 24 Jan 2019 10:02:12 +0000 (20:02 +1000)
drm-misc-next for 5.1:

UAPI Changes:
 - Addition of the Allwinner tiled format modifier

Cross-subsystem Changes:

Core Changes:
 - dma-buf documentation improvements
 - Removal of now unused fbdev helpers
 - Addition of new drm fbdev helpers
 - Improvements to tinydrm
 - Addition of new drm_fourcc helpers
 - Impromevents to i2c-over-aux to handle I2C_M_STOP

Driver Changes:
 - Add support for the TI DS90C185 LVDS bridge
 - Improvements to the thc63lvdm83d bridge
 - Improvements to sun4i YUV and scaler support
 - Fix to the powerdown sequence of panel-innolux

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190123110317.h4tovujaydo2bfz2@flea
159 files changed:
Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
Documentation/devicetree/bindings/display/renesas,du.txt
drivers/acpi/pmic/intel_pmic.c
drivers/acpi/pmic/intel_pmic.h
drivers/acpi/pmic/intel_pmic_chtwc.c
drivers/acpi/pmic/intel_pmic_xpower.c
drivers/gpu/drm/bridge/adv7511/adv7511.h
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/adv7511/adv7533.c
drivers/gpu/drm/bridge/analogix-anx78xx.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/sii902x.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/drm_bridge.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence_reg.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_ioc32.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_query.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_timeline.h
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_connector.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_vbt.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_fifo_underrun.c
drivers/gpu/drm/i915/intel_frontbuffer.c
drivers/gpu/drm/i915/intel_guc_fw.c
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/i915/intel_hdcp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_huc_fw.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_mocs.h
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pipe_crc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/i915/intel_uc_fw.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_vdsc.c
drivers/gpu/drm/i915/intel_wopcm.c
drivers/gpu/drm/i915/intel_workarounds.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/igt_spinner.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/intel_lrc.c
drivers/gpu/drm/i915/selftests/intel_workarounds.c
drivers/gpu/drm/i915/selftests/mock_engine.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/vlv_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/edp/edp_bridge.c
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.h
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_drv.h
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.h
drivers/gpu/drm/rcar-du/rcar_du_group.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_plane.h
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.h
drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
include/drm/bridge/dw_mipi_dsi.h
include/drm/drm_bridge.h
include/drm/drm_dp_helper.h
include/linux/mfd/intel_soc_pmic.h

index ba5469dd09f35393e339af8000d6ca776d1c2f31..27a054e1bb5fb82ccd4bbc2d3120ac56a9b98e49 100644 (file)
@@ -8,6 +8,7 @@ Required properties:
 
 - compatible : Shall contain one of
   - "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
+  - "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
   - "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
   - "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
   - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
@@ -25,7 +26,7 @@ Required properties:
 - clock-names: Name of the clocks. This property is model-dependent.
   - The functional clock, which mandatory for all models, shall be listed
     first, and shall be named "fck".
-  - On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or
+  - On R8A77990, R8A77995 and R8A774C0, the LVDS encoder can use the EXTAL or
     DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be
     named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN
     numerical index.
index 3c855d9f27193bcb03f365cc8c4a3a78a5d478a0..aedb22b4d1613d1ff69b833a5babd227892538e0 100644 (file)
@@ -7,6 +7,7 @@ Required Properties:
     - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
     - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
     - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
+    - "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
     - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
     - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
     - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -57,6 +58,7 @@ corresponding to each DU output.
  R8A7744 (RZ/G1N)       DPAD 0         LVDS 0         -              -
  R8A7745 (RZ/G1E)       DPAD 0         DPAD 1         -              -
  R8A77470 (RZ/G1C)      DPAD 0         DPAD 1         LVDS 0         -
+ R8A774C0 (RZ/G2E)      DPAD 0         LVDS 0         LVDS 1         -
  R8A7779 (R-Car H1)     DPAD 0         DPAD 1         -              -
  R8A7790 (R-Car H2)     DPAD 0         LVDS 0         LVDS 1         -
  R8A7791 (R-Car M2-W)   DPAD 0         LVDS 0         -              -
index ca18e0d23df9775e0a9c342d5a22ac9257d6ee3a..c14cfaea92e25683e26970c24576a47ad3f8b927 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/export.h>
 #include <linux/acpi.h>
+#include <linux/mfd/intel_soc_pmic.h>
 #include <linux/regmap.h>
 #include <acpi/acpi_lpat.h>
 #include "intel_pmic.h"
@@ -36,6 +37,8 @@ struct intel_pmic_opregion {
        struct intel_pmic_regs_handler_ctx ctx;
 };
 
+static struct intel_pmic_opregion *intel_pmic_opregion;
+
 static int pmic_get_reg_bit(int address, struct pmic_table *table,
                            int count, int *reg, int *bit)
 {
@@ -304,6 +307,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
        }
 
        opregion->data = d;
+       intel_pmic_opregion = opregion;
        return 0;
 
 out_remove_thermal_handler:
@@ -319,3 +323,60 @@ out_error:
        return ret;
 }
 EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
+
+/**
+ * intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence
+ * @i2c_address:  I2C client address for the PMIC
+ * @reg_address:  PMIC register address
+ * @value:        New value for the register bits to change
+ * @mask:         Mask indicating which register bits to change
+ *
+ * DSI LCD panels describe an initialization sequence in the i915 VBT (Video
+ * BIOS Tables) using so called MIPI sequences. One possible element in these
+ * sequences is a PMIC specific element of 15 bytes.
+ *
+ * This function executes these PMIC specific elements sending the embedded
+ * commands to the PMIC.
+ *
+ * Return 0 on success, < 0 on failure.
+ */
+int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
+                                             u32 value, u32 mask)
+{
+       struct intel_pmic_opregion_data *d;
+       int ret;
+
+       if (!intel_pmic_opregion) {
+               pr_warn("%s: No PMIC registered\n", __func__);
+               return -ENXIO;
+       }
+
+       d = intel_pmic_opregion->data;
+
+       mutex_lock(&intel_pmic_opregion->lock);
+
+       if (d->exec_mipi_pmic_seq_element) {
+               ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap,
+                                                   i2c_address, reg_address,
+                                                   value, mask);
+       } else if (d->pmic_i2c_address) {
+               if (i2c_address == d->pmic_i2c_address) {
+                       ret = regmap_update_bits(intel_pmic_opregion->regmap,
+                                                reg_address, mask, value);
+               } else {
+                       pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
+                              __func__, i2c_address, reg_address, value, mask);
+                       ret = -ENXIO;
+               }
+       } else {
+               pr_warn("%s: Not implemented\n", __func__);
+               pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n",
+                       __func__, i2c_address, reg_address, value, mask);
+               ret = -EOPNOTSUPP;
+       }
+
+       mutex_unlock(&intel_pmic_opregion->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element);
index 095afc96952ee5acf03b19df763343f121e91ff7..89379476a1df61f255a03195f5663f3d07df5241 100644 (file)
@@ -15,10 +15,14 @@ struct intel_pmic_opregion_data {
        int (*update_aux)(struct regmap *r, int reg, int raw_temp);
        int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
        int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
+       int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
+                                         u32 reg_address, u32 value, u32 mask);
        struct pmic_table *power_table;
        int power_table_count;
        struct pmic_table *thermal_table;
        int thermal_table_count;
+       /* For generic exec_mipi_pmic_seq_element handling */
+       int pmic_i2c_address;
 };
 
 int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
index 078b0448f30a001f90dbc62deb63eafebb78a1ec..7ffd5624b8e15f9734aa3f301e4a3136a8633887 100644 (file)
@@ -231,6 +231,24 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
        return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0);
 }
 
+static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
+                                                  u16 i2c_client_address,
+                                                  u32 reg_address,
+                                                  u32 value, u32 mask)
+{
+       u32 address;
+
+       if (i2c_client_address > 0xff || reg_address > 0xff) {
+               pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
+                       __func__, i2c_client_address, reg_address);
+               return -ERANGE;
+       }
+
+       address = (i2c_client_address << 8) | reg_address;
+
+       return regmap_update_bits(regmap, address, mask, value);
+}
+
 /*
  * The thermal table and ops are empty, we do not support the Thermal opregion
  * (DPTF) due to lacking documentation.
@@ -238,6 +256,7 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
 static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
        .get_power              = intel_cht_wc_pmic_get_power,
        .update_power           = intel_cht_wc_pmic_update_power,
+       .exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
        .power_table            = power_table,
        .power_table_count      = ARRAY_SIZE(power_table),
 };
index 2579675b7082b76e593a095771f50e9c8e07bca9..1b49cbb1e21e85377e7474b7b2f00231e5ed2dce 100644 (file)
@@ -240,6 +240,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
        .power_table_count = ARRAY_SIZE(power_table),
        .thermal_table = thermal_table,
        .thermal_table_count = ARRAY_SIZE(thermal_table),
+       .pmic_i2c_address = 0x34,
 };
 
 static acpi_status intel_xpower_pmic_gpio_handler(u32 function,
index 73d8ccb977427a5b995081db91de5126165565db..2b6e0832d1cfd50f165ad775efccad6c96879d67 100644 (file)
@@ -395,7 +395,7 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
 #ifdef CONFIG_DRM_I2C_ADV7533
 void adv7533_dsi_power_on(struct adv7511 *adv);
 void adv7533_dsi_power_off(struct adv7511 *adv);
-void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
+void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
 int adv7533_patch_registers(struct adv7511 *adv);
 int adv7533_patch_cec_registers(struct adv7511 *adv);
 int adv7533_attach_dsi(struct adv7511 *adv);
@@ -411,7 +411,7 @@ static inline void adv7533_dsi_power_off(struct adv7511 *adv)
 }
 
 static inline void adv7533_mode_set(struct adv7511 *adv,
-                                   struct drm_display_mode *mode)
+                                   const struct drm_display_mode *mode)
 {
 }
 
index 85c2d407a52e1a5476b3269d13655606d10478fd..d0e98caa2e2a1d95f38dce89213cae84cde6490a 100644 (file)
@@ -676,8 +676,8 @@ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
 }
 
 static void adv7511_mode_set(struct adv7511 *adv7511,
-                            struct drm_display_mode *mode,
-                            struct drm_display_mode *adj_mode)
+                            const struct drm_display_mode *mode,
+                            const struct drm_display_mode *adj_mode)
 {
        unsigned int low_refresh_rate;
        unsigned int hsync_polarity = 0;
@@ -839,8 +839,8 @@ static void adv7511_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adj_mode)
+                                   const struct drm_display_mode *mode,
+                                   const struct drm_display_mode *adj_mode)
 {
        struct adv7511 *adv = bridge_to_adv7511(bridge);
 
index 185b6d84216653003bdd0cd26df89bd6e598b3ad..5d5e7d9eded2f4bda06c8baf8ffd4070a5786207 100644 (file)
@@ -108,7 +108,7 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
        regmap_write(adv->regmap_cec, 0x27, 0x0b);
 }
 
-void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
+void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
 {
        struct mipi_dsi_device *dsi = adv->dsi;
        int lanes, ret;
index e11309e9bc4f595b1f93de0f9eb73e8a7cea5963..4cf7bc17ae149699c653936baebd41b1ee1fc5b4 100644 (file)
@@ -1082,8 +1082,8 @@ static void anx78xx_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adjusted_mode)
+                               const struct drm_display_mode *mode,
+                               const struct drm_display_mode *adjusted_mode)
 {
        struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
        struct hdmi_avi_infoframe frame;
index 753e96129ab7a63355d3d50e322780f563e76684..4d5b475858348f9aaf65e81493b241faca560991 100644 (file)
@@ -1361,8 +1361,8 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
-                                       struct drm_display_mode *orig_mode,
-                                       struct drm_display_mode *mode)
+                               const struct drm_display_mode *orig_mode,
+                               const struct drm_display_mode *mode)
 {
        struct analogix_dp_device *dp = bridge->driver_private;
        struct drm_display_info *display_info = &dp->connector.display_info;
index a9b4f45ae87c95e71ec35960857952d4164dcc67..a5d58f7035c157cda4aeb27d7ecb32c0b2c2d9d3 100644 (file)
@@ -232,8 +232,8 @@ static void sii902x_bridge_enable(struct drm_bridge *bridge)
 }
 
 static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adj)
+                                   const struct drm_display_mode *mode,
+                                   const struct drm_display_mode *adj)
 {
        struct sii902x *sii902x = bridge_to_sii902x(bridge);
        struct regmap *regmap = sii902x->regmap;
index 88b720b63126b61a17fa549528db653706355ba6..129f464cbeb19aaffe37d3be6831fd7801ef8f0f 100644 (file)
@@ -1999,8 +1999,8 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
 }
 
 static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *orig_mode,
-                                   struct drm_display_mode *mode)
+                                   const struct drm_display_mode *orig_mode,
+                                   const struct drm_display_mode *mode)
 {
        struct dw_hdmi *hdmi = bridge->driver_private;
 
index 2f4b145b73af23d7db14845a111c5fc9d4f27736..23a5977a3b0a01eec66cf2e8269ac58e137c9642 100644 (file)
@@ -248,7 +248,7 @@ static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
  * The controller should generate 2 frames before
  * preparing the peripheral.
  */
-static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
+static void dw_mipi_dsi_wait_for_two_frames(const struct drm_display_mode *mode)
 {
        int refresh, two_frames;
 
@@ -564,7 +564,7 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
 }
 
 static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
-                                  struct drm_display_mode *mode)
+                                  const struct drm_display_mode *mode)
 {
        u32 val = 0, color = 0;
 
@@ -607,7 +607,7 @@ static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
 }
 
 static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
-                                           struct drm_display_mode *mode)
+                                           const struct drm_display_mode *mode)
 {
        /*
         * TODO dw drv improvements
@@ -642,7 +642,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
 
 /* Get lane byte clock cycles. */
 static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
-                                          struct drm_display_mode *mode,
+                                          const struct drm_display_mode *mode,
                                           u32 hcomponent)
 {
        u32 frac, lbcc;
@@ -658,7 +658,7 @@ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
 }
 
 static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
-                                         struct drm_display_mode *mode)
+                                         const struct drm_display_mode *mode)
 {
        u32 htotal, hsa, hbp, lbcc;
 
@@ -681,7 +681,7 @@ static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
 }
 
 static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
-                                              struct drm_display_mode *mode)
+                                       const struct drm_display_mode *mode)
 {
        u32 vactive, vsa, vfp, vbp;
 
@@ -818,7 +818,7 @@ static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
 }
 
 static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
-                               struct drm_display_mode *adjusted_mode)
+                                const struct drm_display_mode *adjusted_mode)
 {
        const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
        void *priv_data = dsi->plat_data->priv_data;
@@ -861,8 +861,8 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
 }
 
 static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
-                                       struct drm_display_mode *mode,
-                                       struct drm_display_mode *adjusted_mode)
+                                       const struct drm_display_mode *mode,
+                                       const struct drm_display_mode *adjusted_mode)
 {
        struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
 
index 8e28e738cb52dec6ee8ea7eda2d655fc7035be93..4df07f4cbff556a10931072aa3bcc88d7ae87973 100644 (file)
@@ -203,7 +203,7 @@ struct tc_data {
        /* display edid */
        struct edid             *edid;
        /* current mode */
-       struct drm_display_mode *mode;
+       const struct drm_display_mode   *mode;
 
        u32                     rev;
        u8                      assr;
@@ -648,7 +648,8 @@ err_dpcd_read:
        return ret;
 }
 
-static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+static int tc_set_video_mode(struct tc_data *tc,
+                            const struct drm_display_mode *mode)
 {
        int ret;
        int vid_sync_dly;
@@ -1113,8 +1114,8 @@ static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connec
 }
 
 static void tc_bridge_mode_set(struct drm_bridge *bridge,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adj)
+                              const struct drm_display_mode *mode,
+                              const struct drm_display_mode *adj)
 {
        struct tc_data *tc = bridge_to_tc(bridge);
 
index ba7025041e4641ea16858497fde02dee9dd3c8f4..138b2711d389ebc0b49f2a2a7aab1e012e722347 100644 (file)
@@ -294,8 +294,8 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
  * Note: the bridge passed should be the one closest to the encoder
  */
 void drm_bridge_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode)
+                        const struct drm_display_mode *mode,
+                        const struct drm_display_mode *adjusted_mode)
 {
        if (!bridge)
                return;
index d4ecedccbb317771ecd6b0998cea80ca135d4208..54120b6319e7489e91c6c82565dd4946d7a64acd 100644 (file)
@@ -1277,6 +1277,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
        { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
        /* LG LP140WF6-SPM1 eDP panel */
        { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
+       /* Apple panels need some additional handling to support PSR */
+       { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
 };
 
 #undef OUI
index 2fd299a58297edd559b2b5928a63833b3c2622df..dd02e8a323ef524488bf68b7ff01469f44184aa5 100644 (file)
@@ -246,8 +246,8 @@ already_disabled:
 }
 
 static void mic_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode)
+                        const struct drm_display_mode *mode,
+                        const struct drm_display_mode *adjusted_mode)
 {
        struct exynos_mic *mic = bridge->driver_private;
 
index 80e4ff33a37a0da2290e4bad6695d7eaeba9b090..ecdb8070ed357823c5f4383b14c212f27532265c 100644 (file)
@@ -845,7 +845,7 @@ static int tda998x_write_aif(struct tda998x_priv *priv,
 }
 
 static void
-tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
+tda998x_write_avi(struct tda998x_priv *priv, const struct drm_display_mode *mode)
 {
        union hdmi_infoframe frame;
 
@@ -1339,8 +1339,8 @@ static void tda998x_bridge_disable(struct drm_bridge *bridge)
 }
 
 static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
-                                   struct drm_display_mode *mode,
-                                   struct drm_display_mode *adjusted_mode)
+                                   const struct drm_display_mode *mode,
+                                   const struct drm_display_mode *adjusted_mode)
 {
        struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
        unsigned long tmds_clock;
index 19b5fe5016bf6617394da4d0e795d62e8d49a857..c34bee16730d70027bab8a231454d2ebbacaff80 100644 (file)
@@ -40,7 +40,7 @@ i915-y := i915_drv.o \
          i915_mm.o \
          i915_params.o \
          i915_pci.o \
-          i915_suspend.o \
+         i915_suspend.o \
          i915_syncmap.o \
          i915_sw_fence.o \
          i915_sysfs.o \
index 5e6a3013da49645ebf6ea0b1487fc383fc43d3b1..16e0345b711fb3cc782125aa418e804d2e09ab50 100644 (file)
@@ -24,7 +24,6 @@
 #define _INTEL_DVO_H
 
 #include <linux/i2c.h>
-#include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include "intel_drv.h"
 
index c628be05fbfe907a1bce89fd4727df79906fe63f..e1c860f80eb0593dbe1948f897f7c55cfe0d6c99 100644 (file)
@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
                gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
                                                   high_avail / vgpu_types[i].high_mm);
 
-               if (IS_GEN8(gvt->dev_priv))
+               if (IS_GEN(gvt->dev_priv, 8))
                        sprintf(gvt->types[i].name, "GVTg_V4_%s",
                                                vgpu_types[i].name);
-               else if (IS_GEN9(gvt->dev_priv))
+               else if (IS_GEN(gvt->dev_priv, 9))
                        sprintf(gvt->types[i].name, "GVTg_V5_%s",
                                                vgpu_types[i].name);
 
index 95478db9998b51a410b927d654990967c74c5fdc..33e8eed64423af5f00fac38c6ca88dccd7b7911e 100644 (file)
@@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
        int cmd_table_count;
        int ret;
 
-       if (!IS_GEN7(engine->i915))
+       if (!IS_GEN(engine->i915, 7))
                return;
 
        switch (engine->id) {
index 9bad6a32adaef35ba0ccd56ee114687e8ae812e4..d460ef522d9cb6e037bf82031902d1f5db8a2b6a 100644 (file)
@@ -48,7 +48,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 
        intel_device_info_dump_flags(info, &p);
-       intel_device_info_dump_runtime(info, &p);
+       intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
        intel_driver_caps_print(&dev_priv->caps, &p);
 
        kernel_param_lock(THIS_MODULE);
@@ -297,11 +297,12 @@ out:
 }
 
 struct file_stats {
-       struct drm_i915_file_private *file_priv;
+       struct i915_address_space *vm;
        unsigned long count;
        u64 total, unbound;
        u64 global, shared;
        u64 active, inactive;
+       u64 closed;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
@@ -326,9 +327,7 @@ static int per_file_stats(int id, void *ptr, void *data)
                if (i915_vma_is_ggtt(vma)) {
                        stats->global += vma->node.size;
                } else {
-                       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
-
-                       if (ppgtt->vm.file != stats->file_priv)
+                       if (vma->vm != stats->vm)
                                continue;
                }
 
@@ -336,6 +335,9 @@ static int per_file_stats(int id, void *ptr, void *data)
                        stats->active += vma->node.size;
                else
                        stats->inactive += vma->node.size;
+
+               if (i915_vma_is_closed(vma))
+                       stats->closed += vma->node.size;
        }
 
        return 0;
@@ -343,7 +345,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 
 #define print_file_stats(m, name, stats) do { \
        if (stats.count) \
-               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
+               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
                           name, \
                           stats.count, \
                           stats.total, \
@@ -351,20 +353,19 @@ static int per_file_stats(int id, void *ptr, void *data)
                           stats.inactive, \
                           stats.global, \
                           stats.shared, \
-                          stats.unbound); \
+                          stats.unbound, \
+                          stats.closed); \
 } while (0)
 
 static void print_batch_pool_stats(struct seq_file *m,
                                   struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj;
-       struct file_stats stats;
        struct intel_engine_cs *engine;
+       struct file_stats stats = {};
        enum intel_engine_id id;
        int j;
 
-       memset(&stats, 0, sizeof(stats));
-
        for_each_engine(engine, dev_priv, id) {
                for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
                        list_for_each_entry(obj,
@@ -377,44 +378,47 @@ static void print_batch_pool_stats(struct seq_file *m,
        print_file_stats(m, "[k]batch pool", stats);
 }
 
-static int per_file_ctx_stats(int idx, void *ptr, void *data)
+static void print_context_stats(struct seq_file *m,
+                               struct drm_i915_private *i915)
 {
-       struct i915_gem_context *ctx = ptr;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       struct file_stats kstats = {};
+       struct i915_gem_context *ctx;
 
-       for_each_engine(engine, ctx->i915, id) {
-               struct intel_context *ce = to_intel_context(ctx, engine);
+       list_for_each_entry(ctx, &i915->contexts.list, link) {
+               struct intel_engine_cs *engine;
+               enum intel_engine_id id;
 
-               if (ce->state)
-                       per_file_stats(0, ce->state->obj, data);
-               if (ce->ring)
-                       per_file_stats(0, ce->ring->vma->obj, data);
-       }
+               for_each_engine(engine, i915, id) {
+                       struct intel_context *ce = to_intel_context(ctx, engine);
 
-       return 0;
-}
+                       if (ce->state)
+                               per_file_stats(0, ce->state->obj, &kstats);
+                       if (ce->ring)
+                               per_file_stats(0, ce->ring->vma->obj, &kstats);
+               }
 
-static void print_context_stats(struct seq_file *m,
-                               struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct file_stats stats;
-       struct drm_file *file;
+               if (!IS_ERR_OR_NULL(ctx->file_priv)) {
+                       struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
+                       struct drm_file *file = ctx->file_priv->file;
+                       struct task_struct *task;
+                       char name[80];
 
-       memset(&stats, 0, sizeof(stats));
+                       spin_lock(&file->table_lock);
+                       idr_for_each(&file->object_idr, per_file_stats, &stats);
+                       spin_unlock(&file->table_lock);
 
-       mutex_lock(&dev->struct_mutex);
-       if (dev_priv->kernel_context)
-               per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+                       rcu_read_lock();
+                       task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
+                       snprintf(name, sizeof(name), "%s/%d",
+                                task ? task->comm : "<unknown>",
+                                ctx->user_handle);
+                       rcu_read_unlock();
 
-       list_for_each_entry(file, &dev->filelist, lhead) {
-               struct drm_i915_file_private *fpriv = file->driver_priv;
-               idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+                       print_file_stats(m, name, stats);
+               }
        }
-       mutex_unlock(&dev->struct_mutex);
 
-       print_file_stats(m, "[k]contexts", stats);
+       print_file_stats(m, "[k]contexts", kstats);
 }
 
 static int i915_gem_object_info(struct seq_file *m, void *data)
@@ -426,14 +430,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
        u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
        struct drm_i915_gem_object *obj;
        unsigned int page_sizes = 0;
-       struct drm_file *file;
        char buf[80];
        int ret;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
        seq_printf(m, "%u objects, %llu bytes\n",
                   dev_priv->mm.object_count,
                   dev_priv->mm.object_memory);
@@ -514,43 +513,14 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                                        buf, sizeof(buf)));
 
        seq_putc(m, '\n');
-       print_batch_pool_stats(m, dev_priv);
-       mutex_unlock(&dev->struct_mutex);
-
-       mutex_lock(&dev->filelist_mutex);
-       print_context_stats(m, dev_priv);
-       list_for_each_entry_reverse(file, &dev->filelist, lhead) {
-               struct file_stats stats;
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-               struct i915_request *request;
-               struct task_struct *task;
-
-               mutex_lock(&dev->struct_mutex);
 
-               memset(&stats, 0, sizeof(stats));
-               stats.file_priv = file->driver_priv;
-               spin_lock(&file->table_lock);
-               idr_for_each(&file->object_idr, per_file_stats, &stats);
-               spin_unlock(&file->table_lock);
-               /*
-                * Although we have a valid reference on file->pid, that does
-                * not guarantee that the task_struct who called get_pid() is
-                * still alive (e.g. get_pid(current) => fork() => exit()).
-                * Therefore, we need to protect this ->comm access using RCU.
-                */
-               request = list_first_entry_or_null(&file_priv->mm.request_list,
-                                                  struct i915_request,
-                                                  client_link);
-               rcu_read_lock();
-               task = pid_task(request && request->gem_context->pid ?
-                               request->gem_context->pid : file->pid,
-                               PIDTYPE_PID);
-               print_file_stats(m, task ? task->comm : "<unknown>", stats);
-               rcu_read_unlock();
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
-               mutex_unlock(&dev->struct_mutex);
-       }
-       mutex_unlock(&dev->filelist_mutex);
+       print_batch_pool_stats(m, dev_priv);
+       print_context_stats(m, dev_priv);
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -984,8 +954,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
        intel_runtime_pm_get(i915);
        gpu = i915_capture_gpu_state(i915);
        intel_runtime_pm_put(i915);
-       if (!gpu)
-               return -ENOMEM;
+       if (IS_ERR(gpu))
+               return PTR_ERR(gpu);
 
        file->private_data = gpu;
        return 0;
@@ -1018,7 +988,13 @@ i915_error_state_write(struct file *filp,
 
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
-       file->private_data = i915_first_error_state(inode->i_private);
+       struct i915_gpu_state *error;
+
+       error = i915_first_error_state(inode->i_private);
+       if (IS_ERR(error))
+               return PTR_ERR(error);
+
+       file->private_data  = error;
        return 0;
 }
 
@@ -1032,30 +1008,6 @@ static const struct file_operations i915_error_state_fops = {
 };
 #endif
 
-static int
-i915_next_seqno_set(void *data, u64 val)
-{
-       struct drm_i915_private *dev_priv = data;
-       struct drm_device *dev = &dev_priv->drm;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       intel_runtime_pm_get(dev_priv);
-       ret = i915_gem_set_global_seqno(dev, val);
-       intel_runtime_pm_put(dev_priv);
-
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
-                       NULL, i915_next_seqno_set,
-                       "0x%llx\n");
-
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1064,7 +1016,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 
        intel_runtime_pm_get(dev_priv);
 
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                u16 rgvswctl = I915_READ16(MEMSWCTL);
                u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
@@ -1785,7 +1737,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
        unsigned long temp, chipset, gfx;
        int ret;
 
-       if (!IS_GEN5(dev_priv))
+       if (!IS_GEN(dev_priv, 5))
                return -ENODEV;
 
        intel_runtime_pm_get(dev_priv);
@@ -2034,7 +1986,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
                   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
 
-       if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
+       if (IS_GEN_RANGE(dev_priv, 3, 4)) {
                seq_printf(m, "DDC = 0x%08x\n",
                           I915_READ(DCC));
                seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2070,124 +2022,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static int per_file_ctx(int id, void *ptr, void *data)
-{
-       struct i915_gem_context *ctx = ptr;
-       struct seq_file *m = data;
-       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-
-       if (!ppgtt) {
-               seq_printf(m, "  no ppgtt for context %d\n",
-                          ctx->user_handle);
-               return 0;
-       }
-
-       if (i915_gem_context_is_default(ctx))
-               seq_puts(m, "  default context:\n");
-       else
-               seq_printf(m, "  context %d:\n", ctx->user_handle);
-       ppgtt->debug_dump(ppgtt, m);
-
-       return 0;
-}
-
-static void gen8_ppgtt_info(struct seq_file *m,
-                           struct drm_i915_private *dev_priv)
-{
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int i;
-
-       if (!ppgtt)
-               return;
-
-       for_each_engine(engine, dev_priv, id) {
-               seq_printf(m, "%s\n", engine->name);
-               for (i = 0; i < 4; i++) {
-                       u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
-                       pdp <<= 32;
-                       pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
-                       seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
-               }
-       }
-}
-
-static void gen6_ppgtt_info(struct seq_file *m,
-                           struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       if (IS_GEN6(dev_priv))
-               seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
-
-       for_each_engine(engine, dev_priv, id) {
-               seq_printf(m, "%s\n", engine->name);
-               if (IS_GEN7(dev_priv))
-                       seq_printf(m, "GFX_MODE: 0x%08x\n",
-                                  I915_READ(RING_MODE_GEN7(engine)));
-               seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
-                          I915_READ(RING_PP_DIR_BASE(engine)));
-               seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
-                          I915_READ(RING_PP_DIR_BASE_READ(engine)));
-               seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
-                          I915_READ(RING_PP_DIR_DCLV(engine)));
-       }
-       if (dev_priv->mm.aliasing_ppgtt) {
-               struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
-               seq_puts(m, "aliasing PPGTT:\n");
-               seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
-
-               ppgtt->debug_dump(ppgtt, m);
-       }
-
-       seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
-}
-
-static int i915_ppgtt_info(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_file *file;
-       int ret;
-
-       mutex_lock(&dev->filelist_mutex);
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               goto out_unlock;
-
-       intel_runtime_pm_get(dev_priv);
-
-       if (INTEL_GEN(dev_priv) >= 8)
-               gen8_ppgtt_info(m, dev_priv);
-       else if (INTEL_GEN(dev_priv) >= 6)
-               gen6_ppgtt_info(m, dev_priv);
-
-       list_for_each_entry_reverse(file, &dev->filelist, lhead) {
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-               struct task_struct *task;
-
-               task = get_pid_task(file->pid, PIDTYPE_PID);
-               if (!task) {
-                       ret = -ESRCH;
-                       goto out_rpm;
-               }
-               seq_printf(m, "\nproc: %s\n", task->comm);
-               put_task_struct(task);
-               idr_for_each(&file_priv->context_idr, per_file_ctx,
-                            (void *)(unsigned long)m);
-       }
-
-out_rpm:
-       intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
-out_unlock:
-       mutex_unlock(&dev->filelist_mutex);
-       return ret;
-}
-
 static int count_irq_waiters(struct drm_i915_private *i915)
 {
        struct intel_engine_cs *engine;
@@ -3120,14 +2954,13 @@ static const char *plane_type(enum drm_plane_type type)
        return "unknown";
 }
 
-static const char *plane_rotation(unsigned int rotation)
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
 {
-       static char buf[48];
        /*
         * According to doc only one DRM_MODE_ROTATE_ is allowed but this
         * will print them all to visualize if the values are misused
         */
-       snprintf(buf, sizeof(buf),
+       snprintf(buf, bufsize,
                 "%s%s%s%s%s%s(0x%08x)",
                 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
                 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
@@ -3136,8 +2969,6 @@ static const char *plane_rotation(unsigned int rotation)
                 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
                 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
                 rotation);
-
-       return buf;
 }
 
 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
@@ -3150,6 +2981,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
                struct drm_plane_state *state;
                struct drm_plane *plane = &intel_plane->base;
                struct drm_format_name_buf format_name;
+               char rot_str[48];
 
                if (!plane->state) {
                        seq_puts(m, "plane->state is NULL!\n");
@@ -3165,6 +2997,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
                        sprintf(format_name.str, "N/A");
                }
 
+               plane_rotation(rot_str, sizeof(rot_str), state->rotation);
+
                seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
                           plane->base.id,
                           plane_type(intel_plane->base.type),
@@ -3179,7 +3013,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
                           (state->src_h >> 16),
                           ((state->src_h & 0xffff) * 15625) >> 10,
                           format_name.str,
-                          plane_rotation(state->rotation));
+                          rot_str);
        }
 }
 
@@ -3286,7 +3120,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
        seq_printf(m, "Global active requests: %d\n",
                   dev_priv->gt.active_requests);
        seq_printf(m, "CS timestamp frequency: %u kHz\n",
-                  dev_priv->info.cs_timestamp_frequency_khz);
+                  RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
 
        p = drm_seq_file_printer(m);
        for_each_engine(engine, dev_priv, id)
@@ -3302,7 +3136,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_printer p = drm_seq_file_printer(m);
 
-       intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
+       intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
 
        return 0;
 }
@@ -4206,9 +4040,6 @@ i915_drop_caches_set(void *data, u64 val)
                                                     I915_WAIT_LOCKED,
                                                     MAX_SCHEDULE_TIMEOUT);
 
-               if (ret == 0 && val & DROP_RESET_SEQNO)
-                       ret = i915_gem_set_global_seqno(&i915->drm, 1);
-
                if (val & DROP_RETIRE)
                        i915_retire_requests(i915);
 
@@ -4261,7 +4092,7 @@ i915_cache_sharing_get(void *data, u64 *val)
        struct drm_i915_private *dev_priv = data;
        u32 snpcr;
 
-       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+       if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
                return -ENODEV;
 
        intel_runtime_pm_get(dev_priv);
@@ -4281,7 +4112,7 @@ i915_cache_sharing_set(void *data, u64 val)
        struct drm_i915_private *dev_priv = data;
        u32 snpcr;
 
-       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+       if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
                return -ENODEV;
 
        if (val > 3)
@@ -4341,7 +4172,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
                                     struct sseu_dev_info *sseu)
 {
 #define SS_MAX 6
-       const struct intel_device_info *info = INTEL_INFO(dev_priv);
+       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
        int s, ss;
 
@@ -4397,7 +4228,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
                                    struct sseu_dev_info *sseu)
 {
 #define SS_MAX 3
-       const struct intel_device_info *info = INTEL_INFO(dev_priv);
+       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
        int s, ss;
 
@@ -4425,7 +4256,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
 
                if (IS_GEN9_BC(dev_priv))
                        sseu->subslice_mask[s] =
-                               INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
 
                for (ss = 0; ss < info->sseu.max_subslices; ss++) {
                        unsigned int eu_cnt;
@@ -4459,10 +4290,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
 
        if (sseu->slice_mask) {
                sseu->eu_per_subslice =
-                               INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+                       RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
                for (s = 0; s < fls(sseu->slice_mask); s++) {
                        sseu->subslice_mask[s] =
-                               INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
                }
                sseu->eu_total = sseu->eu_per_subslice *
                                 sseu_subslice_total(sseu);
@@ -4470,7 +4301,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
                /* subtract fused off EU(s) from enabled slice(s) */
                for (s = 0; s < fls(sseu->slice_mask); s++) {
                        u8 subslice_7eu =
-                               INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
 
                        sseu->eu_total -= hweight8(subslice_7eu);
                }
@@ -4523,14 +4354,14 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
                return -ENODEV;
 
        seq_puts(m, "SSEU Device Info\n");
-       i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
+       i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
 
        seq_puts(m, "SSEU Device Status\n");
        memset(&sseu, 0, sizeof(sseu));
-       sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
-       sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
+       sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
+       sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
        sseu.max_eus_per_subslice =
-               INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
+               RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
 
        intel_runtime_pm_get(dev_priv);
 
@@ -4538,7 +4369,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
                cherryview_sseu_device_status(dev_priv, &sseu);
        } else if (IS_BROADWELL(dev_priv)) {
                broadwell_sseu_device_status(dev_priv, &sseu);
-       } else if (IS_GEN9(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 9)) {
                gen9_sseu_device_status(dev_priv, &sseu);
        } else if (INTEL_GEN(dev_priv) >= 10) {
                gen10_sseu_device_status(dev_priv, &sseu);
@@ -4899,7 +4730,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_context_status", i915_context_status, 0},
        {"i915_forcewake_domains", i915_forcewake_domains, 0},
        {"i915_swizzle_info", i915_swizzle_info, 0},
-       {"i915_ppgtt_info", i915_ppgtt_info, 0},
        {"i915_llc", i915_llc, 0},
        {"i915_edp_psr_status", i915_edp_psr_status, 0},
        {"i915_energy_uJ", i915_energy_uJ, 0},
@@ -4934,7 +4764,6 @@ static const struct i915_debugfs_files {
        {"i915_gpu_info", &i915_gpu_info_fops},
 #endif
        {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
-       {"i915_next_seqno", &i915_next_seqno_fops},
        {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
        {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
        {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -5081,6 +4910,106 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
 
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct drm_device *dev = connector->dev;
+       struct drm_crtc *crtc;
+       struct intel_dp *intel_dp;
+       struct drm_modeset_acquire_ctx ctx;
+       struct intel_crtc_state *crtc_state = NULL;
+       int ret = 0;
+       bool try_again = false;
+
+       drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+       do {
+               try_again = false;
+               ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+                                      &ctx);
+               if (ret) {
+                       ret = -EINTR;
+                       break;
+               }
+               crtc = connector->state->crtc;
+               if (connector->status != connector_status_connected || !crtc) {
+                       ret = -ENODEV;
+                       break;
+               }
+               ret = drm_modeset_lock(&crtc->mutex, &ctx);
+               if (ret == -EDEADLK) {
+                       ret = drm_modeset_backoff(&ctx);
+                       if (!ret) {
+                               try_again = true;
+                               continue;
+                       }
+                       break;
+               } else if (ret) {
+                       break;
+               }
+               intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+               crtc_state = to_intel_crtc_state(crtc->state);
+               seq_printf(m, "DSC_Enabled: %s\n",
+                          yesno(crtc_state->dsc_params.compression_enable));
+               if (intel_dp->dsc_dpcd)
+                       seq_printf(m, "DSC_Sink_Support: %s\n",
+                                  yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+               if (!intel_dp_is_edp(intel_dp))
+                       seq_printf(m, "FEC_Sink_Support: %s\n",
+                                  yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+       } while (try_again);
+
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
+       return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+                                         const char __user *ubuf,
+                                         size_t len, loff_t *offp)
+{
+       bool dsc_enable = false;
+       int ret;
+       struct drm_connector *connector =
+               ((struct seq_file *)file->private_data)->private;
+       struct intel_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+       if (len == 0)
+               return 0;
+
+       DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
+                        len);
+
+       ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+       if (ret < 0)
+               return ret;
+
+       DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
+                        (dsc_enable) ? "true" : "false");
+       intel_dp->force_dsc_en = dsc_enable;
+
+       *offp += len;
+       return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+                                    struct file *file)
+{
+       return single_open(file, i915_dsc_fec_support_show,
+                          inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_dsc_fec_support_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = i915_dsc_fec_support_write
+};
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -5093,6 +5022,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
 int i915_debugfs_connector_add(struct drm_connector *connector)
 {
        struct dentry *root = connector->debugfs_entry;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
 
        /* The connector must have been registered beforehands. */
        if (!root)
@@ -5117,5 +5047,11 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
                                    connector, &i915_hdcp_sink_capability_fops);
        }
 
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+            connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+               debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+                                   connector, &i915_dsc_fec_support_fops);
+
        return 0;
 }
index b310a897a4adab444349252503fba2e92134814f..75652dc1e24cde6ce500e45cf0e50793917c4b78 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/vt.h>
 #include <acpi/video.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/i915_drm.h>
@@ -132,15 +131,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
        switch (id) {
        case INTEL_PCH_IBX_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
-               WARN_ON(!IS_GEN5(dev_priv));
+               WARN_ON(!IS_GEN(dev_priv, 5));
                return PCH_IBX;
        case INTEL_PCH_CPT_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-               WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+               WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
                return PCH_CPT;
        case INTEL_PCH_PPT_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-               WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+               WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
                /* PantherPoint is CPT compatible */
                return PCH_CPT;
        case INTEL_PCH_LPT_DEVICE_ID_TYPE:
@@ -217,9 +216,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
         * make an educated guess as to which PCH is really there.
         */
 
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-       else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+       else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
                id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
        else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
                id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
@@ -349,7 +348,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
                break;
        case I915_PARAM_HAS_SEMAPHORES:
-               value = HAS_LEGACY_SEMAPHORES(dev_priv);
+               value = 0;
                break;
        case I915_PARAM_HAS_SECURE_BATCHES:
                value = capable(CAP_SYS_ADMIN);
@@ -358,12 +357,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = i915_cmd_parser_get_version(dev_priv);
                break;
        case I915_PARAM_SUBSLICE_TOTAL:
-               value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
+               value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_EU_TOTAL:
-               value = INTEL_INFO(dev_priv)->sseu.eu_total;
+               value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
                if (!value)
                        return -ENODEV;
                break;
@@ -380,7 +379,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = HAS_POOLED_EU(dev_priv);
                break;
        case I915_PARAM_MIN_EU_IN_POOL:
-               value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+               value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
                break;
        case I915_PARAM_HUC_STATUS:
                value = intel_huc_check_status(&dev_priv->huc);
@@ -430,17 +429,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = intel_engines_has_context_isolation(dev_priv);
                break;
        case I915_PARAM_SLICE_MASK:
-               value = INTEL_INFO(dev_priv)->sseu.slice_mask;
+               value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_SUBSLICE_MASK:
-               value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
+               value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
-               value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+               value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
                break;
        case I915_PARAM_MMAP_GTT_COHERENT:
                value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
@@ -966,7 +965,7 @@ static int i915_mmio_setup(struct drm_i915_private *dev_priv)
        int mmio_bar;
        int mmio_size;
 
-       mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
+       mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
        /*
         * Before gen4, the registers and the GTT are behind different BARs.
         * However, from gen4 onwards, the registers and the GTT are shared
@@ -1341,7 +1340,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
        /* Need to calculate bandwidth only for Gen9 */
        if (IS_BROXTON(dev_priv))
                ret = bxt_get_dram_info(dev_priv);
-       else if (IS_GEN9(dev_priv))
+       else if (IS_GEN(dev_priv, 9))
                ret = skl_get_dram_info(dev_priv);
        else
                ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,7 +1373,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
        if (i915_inject_load_failure())
                return -ENODEV;
 
-       intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
+       intel_device_info_runtime_init(dev_priv);
 
        if (HAS_PPGTT(dev_priv)) {
                if (intel_vgpu_active(dev_priv) &&
@@ -1436,7 +1435,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
        pci_set_master(pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
-       if (IS_GEN2(dev_priv)) {
+       if (IS_GEN(dev_priv, 2)) {
                ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
                if (ret) {
                        DRM_ERROR("failed to set DMA mask\n");
@@ -1574,7 +1573,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
                acpi_video_register();
        }
 
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                intel_gpu_ips_init(dev_priv);
 
        intel_audio_init(dev_priv);
@@ -1636,8 +1635,14 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
        if (drm_debug & DRM_UT_DRIVER) {
                struct drm_printer p = drm_debug_printer("i915 device info:");
 
-               intel_device_info_dump(&dev_priv->info, &p);
-               intel_device_info_dump_runtime(&dev_priv->info, &p);
+               drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+                          INTEL_DEVID(dev_priv),
+                          INTEL_REVID(dev_priv),
+                          intel_platform_name(INTEL_INFO(dev_priv)->platform),
+                          INTEL_GEN(dev_priv));
+
+               intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
+               intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
        }
 
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1674,7 +1679,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Setup the write-once "constant" device info */
        device_info = mkwrite_device_info(i915);
        memcpy(device_info, match_info, sizeof(*device_info));
-       device_info->device_id = pdev->device;
+       RUNTIME_INFO(i915)->device_id = pdev->device;
 
        BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
                     BITS_PER_TYPE(device_info->platform_mask));
@@ -2174,7 +2179,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_power_domains_resume(dev_priv);
 
-       intel_engines_sanitize(dev_priv);
+       intel_engines_sanitize(dev_priv, true);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
@@ -2226,6 +2231,7 @@ void i915_reset(struct drm_i915_private *i915,
 
        might_sleep();
        lockdep_assert_held(&i915->drm.struct_mutex);
+       assert_rpm_wakelock_held(i915);
        GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
 
        if (!test_bit(I915_RESET_HANDOFF, &error->flags))
index b1c31967194b92cc47b8a3cedafea021d82fd073..5df26ccda8a4fb5fe6e7f5b2273110d3f939e8ff 100644 (file)
@@ -46,7 +46,6 @@
 #include <linux/reservation.h>
 #include <linux/shmem_fs.h>
 
-#include <drm/drmP.h>
 #include <drm/intel-gtt.h>
 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
 #include <drm/drm_gem.h>
@@ -54,6 +53,7 @@
 #include <drm/drm_cache.h>
 #include <drm/drm_util.h>
 #include <drm/drm_dsc.h>
+#include <drm/drm_connector.h>
 
 #include "i915_fixed.h"
 #include "i915_params.h"
@@ -90,8 +90,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20181204"
-#define DRIVER_TIMESTAMP       1543944377
+#define DRIVER_DATE            "20190110"
+#define DRIVER_TIMESTAMP       1547162337
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -281,16 +281,14 @@ struct drm_i915_display_funcs {
        int (*get_fifo_size)(struct drm_i915_private *dev_priv,
                             enum i9xx_plane_id i9xx_plane);
        int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
-       int (*compute_intermediate_wm)(struct drm_device *dev,
-                                      struct intel_crtc *intel_crtc,
-                                      struct intel_crtc_state *newstate);
+       int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
        void (*initial_watermarks)(struct intel_atomic_state *state,
                                   struct intel_crtc_state *cstate);
        void (*atomic_update_watermarks)(struct intel_atomic_state *state,
                                         struct intel_crtc_state *cstate);
        void (*optimize_watermarks)(struct intel_atomic_state *state,
                                    struct intel_crtc_state *cstate);
-       int (*compute_global_watermarks)(struct drm_atomic_state *state);
+       int (*compute_global_watermarks)(struct intel_atomic_state *state);
        void (*update_wm)(struct intel_crtc *crtc);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
        /* Returns the active state of the crtc, and if the crtc is active,
@@ -322,8 +320,8 @@ struct drm_i915_display_funcs {
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
 
-       void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
-       void (*load_luts)(struct drm_crtc_state *crtc_state);
+       void (*load_csc_matrix)(struct intel_crtc_state *crtc_state);
+       void (*load_luts)(struct intel_crtc_state *crtc_state);
 };
 
 #define CSR_VERSION(major, minor)      ((major) << 16 | (minor))
@@ -509,6 +507,7 @@ struct i915_psr {
        ktime_t last_exit;
        bool sink_not_reliable;
        bool irq_aux_error;
+       u16 su_x_granularity;
 };
 
 enum intel_pch {
@@ -936,6 +935,8 @@ struct ddi_vbt_port_info {
        uint8_t supports_hdmi:1;
        uint8_t supports_dp:1;
        uint8_t supports_edp:1;
+       uint8_t supports_typec_usb:1;
+       uint8_t supports_tbt:1;
 
        uint8_t alternate_aux_channel;
        uint8_t alternate_ddc_pin;
@@ -1430,7 +1431,8 @@ struct drm_i915_private {
        struct kmem_cache *dependencies;
        struct kmem_cache *priorities;
 
-       const struct intel_device_info info;
+       const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
+       struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
        struct intel_driver_caps caps;
 
        /**
@@ -1947,7 +1949,6 @@ struct drm_i915_private {
                struct list_head active_rings;
                struct list_head closed_vma;
                u32 active_requests;
-               u32 request_serial;
 
                /**
                 * Is the GPU currently considered idle, or busy executing
@@ -2191,17 +2192,12 @@ static inline unsigned int i915_sg_segment_size(void)
        return size;
 }
 
-static inline const struct intel_device_info *
-intel_info(const struct drm_i915_private *dev_priv)
-{
-       return &dev_priv->info;
-}
-
-#define INTEL_INFO(dev_priv)   intel_info((dev_priv))
+#define INTEL_INFO(dev_priv)   (&(dev_priv)->__info)
+#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
 #define DRIVER_CAPS(dev_priv)  (&(dev_priv)->caps)
 
-#define INTEL_GEN(dev_priv)    ((dev_priv)->info.gen)
-#define INTEL_DEVID(dev_priv)  ((dev_priv)->info.device_id)
+#define INTEL_GEN(dev_priv)    (INTEL_INFO(dev_priv)->gen)
+#define INTEL_DEVID(dev_priv)  (RUNTIME_INFO(dev_priv)->device_id)
 
 #define REVID_FOREVER          0xff
 #define INTEL_REVID(dev_priv)  ((dev_priv)->drm.pdev->revision)
@@ -2212,8 +2208,12 @@ intel_info(const struct drm_i915_private *dev_priv)
        GENMASK((e) - 1, (s) - 1))
 
 /* Returns true if Gen is in inclusive range [Start, End] */
-#define IS_GEN(dev_priv, s, e) \
-       (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
+#define IS_GEN_RANGE(dev_priv, s, e) \
+       (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
+
+#define IS_GEN(dev_priv, n) \
+       (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
+        INTEL_INFO(dev_priv)->gen == (n))
 
 /*
  * Return true if revision is in range [since,until] inclusive.
@@ -2223,7 +2223,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_REVID(p, since, until) \
        (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
 
-#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
+#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
 
 #define IS_I830(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I830)
 #define IS_I845G(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2245,7 +2245,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_IRONLAKE_M(dev_priv)        (INTEL_DEVID(dev_priv) == 0x0046)
 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
 #define IS_IVB_GT1(dev_priv)   (IS_IVYBRIDGE(dev_priv) && \
-                                (dev_priv)->info.gt == 1)
+                                INTEL_INFO(dev_priv)->gt == 1)
 #define IS_VALLEYVIEW(dev_priv)        IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
 #define IS_CHERRYVIEW(dev_priv)        IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
 #define IS_HASWELL(dev_priv)   IS_PLATFORM(dev_priv, INTEL_HASWELL)
@@ -2257,7 +2257,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_COFFEELAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
 #define IS_CANNONLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
 #define IS_ICELAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_MOBILE(dev_priv)    ((dev_priv)->info.is_mobile)
+#define IS_MOBILE(dev_priv)    (INTEL_INFO(dev_priv)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev_priv)   (IS_BROADWELL(dev_priv) && \
@@ -2268,11 +2268,13 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_BDW_ULX(dev_priv)   (IS_BROADWELL(dev_priv) && \
                                 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
 #define IS_BDW_GT3(dev_priv)   (IS_BROADWELL(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_HSW_ULT(dev_priv)   (IS_HASWELL(dev_priv) && \
                                 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
 #define IS_HSW_GT3(dev_priv)   (IS_HASWELL(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
+#define IS_HSW_GT1(dev_priv)   (IS_HASWELL(dev_priv) && \
+                                INTEL_INFO(dev_priv)->gt == 1)
 /* ULX machines are also considered ULT. */
 #define IS_HSW_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x0A0E || \
                                 INTEL_DEVID(dev_priv) == 0x0A1E)
@@ -2295,21 +2297,21 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_AML_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x591C || \
                                 INTEL_DEVID(dev_priv) == 0x87C0)
 #define IS_SKL_GT2(dev_priv)   (IS_SKYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 2)
+                                INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_SKL_GT4(dev_priv)   (IS_SKYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 4)
+                                INTEL_INFO(dev_priv)->gt == 4)
 #define IS_KBL_GT2(dev_priv)   (IS_KABYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 2)
+                                INTEL_INFO(dev_priv)->gt == 2)
 #define IS_KBL_GT3(dev_priv)   (IS_KABYLAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_CFL_ULT(dev_priv)   (IS_COFFEELAKE(dev_priv) && \
                                 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
 #define IS_CFL_GT2(dev_priv)   (IS_COFFEELAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 2)
+                                INTEL_INFO(dev_priv)->gt == 2)
 #define IS_CFL_GT3(dev_priv)   (IS_COFFEELAKE(dev_priv) && \
-                                (dev_priv)->info.gt == 3)
+                                INTEL_INFO(dev_priv)->gt == 3)
 #define IS_CNL_WITH_PORT_F(dev_priv)   (IS_CANNONLAKE(dev_priv) && \
                                        (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
 
@@ -2366,26 +2368,9 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_ICL_REVID(p, since, until) \
        (IS_ICELAKE(p) && IS_REVID(p, since, until))
 
-/*
- * The genX designation typically refers to the render engine, so render
- * capability related checks should use IS_GEN, while display and other checks
- * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
- * chips, etc.).
- */
-#define IS_GEN2(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(1)))
-#define IS_GEN3(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(2)))
-#define IS_GEN4(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(3)))
-#define IS_GEN5(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(4)))
-#define IS_GEN6(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(5)))
-#define IS_GEN7(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(6)))
-#define IS_GEN8(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(7)))
-#define IS_GEN9(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(8)))
-#define IS_GEN10(dev_priv)     (!!((dev_priv)->info.gen_mask & BIT(9)))
-#define IS_GEN11(dev_priv)     (!!((dev_priv)->info.gen_mask & BIT(10)))
-
 #define IS_LP(dev_priv)        (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv)   (IS_GEN9(dev_priv) && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv)   (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
+#define IS_GEN9_LP(dev_priv)   (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv)   (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
 
 #define ENGINE_MASK(id)        BIT(id)
 #define RENDER_RING    ENGINE_MASK(RCS)
@@ -2399,29 +2384,27 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define ALL_ENGINES    (~0)
 
 #define HAS_ENGINE(dev_priv, id) \
-       (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
+       (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
 
 #define HAS_BSD(dev_priv)      HAS_ENGINE(dev_priv, VCS)
 #define HAS_BSD2(dev_priv)     HAS_ENGINE(dev_priv, VCS2)
 #define HAS_BLT(dev_priv)      HAS_ENGINE(dev_priv, BCS)
 #define HAS_VEBOX(dev_priv)    HAS_ENGINE(dev_priv, VECS)
 
-#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
-
-#define HAS_LLC(dev_priv)      ((dev_priv)->info.has_llc)
-#define HAS_SNOOP(dev_priv)    ((dev_priv)->info.has_snoop)
+#define HAS_LLC(dev_priv)      (INTEL_INFO(dev_priv)->has_llc)
+#define HAS_SNOOP(dev_priv)    (INTEL_INFO(dev_priv)->has_snoop)
 #define HAS_EDRAM(dev_priv)    (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev_priv)       ((IS_HASWELL(dev_priv) || \
                                 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
 
-#define HWS_NEEDS_PHYSICAL(dev_priv)   ((dev_priv)->info.hws_needs_physical)
+#define HWS_NEEDS_PHYSICAL(dev_priv)   (INTEL_INFO(dev_priv)->hws_needs_physical)
 
 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
-               ((dev_priv)->info.has_logical_ring_contexts)
+               (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
-               ((dev_priv)->info.has_logical_ring_elsq)
+               (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
-               ((dev_priv)->info.has_logical_ring_preemption)
+               (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
 
 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
 
@@ -2435,12 +2418,12 @@ intel_info(const struct drm_i915_private *dev_priv)
 
 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
        GEM_BUG_ON((sizes) == 0); \
-       ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
+       ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
 })
 
-#define HAS_OVERLAY(dev_priv)           ((dev_priv)->info.display.has_overlay)
+#define HAS_OVERLAY(dev_priv)           (INTEL_INFO(dev_priv)->display.has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
-               ((dev_priv)->info.display.overlay_needs_physical)
+               (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)    (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2458,42 +2441,42 @@ intel_info(const struct drm_i915_private *dev_priv)
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
                                         !(IS_I915G(dev_priv) || \
                                         IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv)          ((dev_priv)->info.display.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv)     ((dev_priv)->info.display.has_hotplug)
+#define SUPPORTS_TV(dev_priv)          (INTEL_INFO(dev_priv)->display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv)     (INTEL_INFO(dev_priv)->display.has_hotplug)
 
 #define HAS_FW_BLC(dev_priv)   (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv)      ((dev_priv)->info.display.has_fbc)
+#define HAS_FBC(dev_priv)      (INTEL_INFO(dev_priv)->display.has_fbc)
 #define HAS_CUR_FBC(dev_priv)  (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
 
 #define HAS_IPS(dev_priv)      (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
 
-#define HAS_DP_MST(dev_priv)   ((dev_priv)->info.display.has_dp_mst)
+#define HAS_DP_MST(dev_priv)   (INTEL_INFO(dev_priv)->display.has_dp_mst)
 
-#define HAS_DDI(dev_priv)               ((dev_priv)->info.display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv)               ((dev_priv)->info.display.has_psr)
+#define HAS_DDI(dev_priv)               (INTEL_INFO(dev_priv)->display.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
+#define HAS_PSR(dev_priv)               (INTEL_INFO(dev_priv)->display.has_psr)
 
-#define HAS_RC6(dev_priv)               ((dev_priv)->info.has_rc6)
-#define HAS_RC6p(dev_priv)              ((dev_priv)->info.has_rc6p)
+#define HAS_RC6(dev_priv)               (INTEL_INFO(dev_priv)->has_rc6)
+#define HAS_RC6p(dev_priv)              (INTEL_INFO(dev_priv)->has_rc6p)
 #define HAS_RC6pp(dev_priv)             (false) /* HW was never validated */
 
-#define HAS_CSR(dev_priv)      ((dev_priv)->info.display.has_csr)
+#define HAS_CSR(dev_priv)      (INTEL_INFO(dev_priv)->display.has_csr)
 
-#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
-#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
 
-#define HAS_IPC(dev_priv)               ((dev_priv)->info.display.has_ipc)
+#define HAS_IPC(dev_priv)               (INTEL_INFO(dev_priv)->display.has_ipc)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
  * properties, so we have separate macros to test them.
  */
-#define HAS_GUC(dev_priv)      ((dev_priv)->info.has_guc)
-#define HAS_GUC_CT(dev_priv)   ((dev_priv)->info.has_guc_ct)
+#define HAS_GUC(dev_priv)      (INTEL_INFO(dev_priv)->has_guc)
+#define HAS_GUC_CT(dev_priv)   (INTEL_INFO(dev_priv)->has_guc_ct)
 #define HAS_GUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 #define HAS_GUC_SCHED(dev_priv)        (HAS_GUC(dev_priv))
 
@@ -2502,11 +2485,11 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_HUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 
 /* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv)             intel_uc_is_using_guc()
-#define USES_GUC_SUBMISSION(dev_priv)  intel_uc_is_using_guc_submission()
-#define USES_HUC(dev_priv)             intel_uc_is_using_huc()
+#define USES_GUC(dev_priv)             intel_uc_is_using_guc(dev_priv)
+#define USES_GUC_SUBMISSION(dev_priv)  intel_uc_is_using_guc_submission(dev_priv)
+#define USES_HUC(dev_priv)             intel_uc_is_using_huc(dev_priv)
 
-#define HAS_POOLED_EU(dev_priv)        ((dev_priv)->info.has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv)        (INTEL_INFO(dev_priv)->has_pooled_eu)
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff80
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
@@ -2546,12 +2529,12 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch_display)
 
 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
                                 2 : HAS_L3_DPF(dev_priv))
 
@@ -2916,9 +2899,9 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
        __i915_gem_object_unpin_pages(obj);
 }
 
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
        I915_MM_NORMAL = 0,
-       I915_MM_SHRINKER
+       I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
 };
 
 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
@@ -3204,7 +3187,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
 void i915_gem_shrinker_register(struct drm_i915_private *i915);
 void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+                                   struct mutex *mutex);
 
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3313,7 +3297,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
 static inline struct intel_device_info *
 mkwrite_device_info(struct drm_i915_private *dev_priv)
 {
-       return (struct intel_device_info *)&dev_priv->info;
+       return (struct intel_device_info *)INTEL_INFO(dev_priv);
 }
 
 /* modesetting */
@@ -3599,90 +3583,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
        }
 }
 
-static inline bool
-__i915_request_irq_complete(const struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 seqno;
-
-       /* Note that the engine may have wrapped around the seqno, and
-        * so our request->global_seqno will be ahead of the hardware,
-        * even though it completed the request before wrapping. We catch
-        * this by kicking all the waiters before resetting the seqno
-        * in hardware, and also signal the fence.
-        */
-       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
-               return true;
-
-       /* The request was dequeued before we were awoken. We check after
-        * inspecting the hw to confirm that this was the same request
-        * that generated the HWS update. The memory barriers within
-        * the request execution are sufficient to ensure that a check
-        * after reading the value from hw matches this request.
-        */
-       seqno = i915_request_global_seqno(rq);
-       if (!seqno)
-               return false;
-
-       /* Before we do the heavier coherent read of the seqno,
-        * check the value (hopefully) in the CPU cacheline.
-        */
-       if (__i915_request_completed(rq, seqno))
-               return true;
-
-       /* Ensure our read of the seqno is coherent so that we
-        * do not "miss an interrupt" (i.e. if this is the last
-        * request and the seqno write from the GPU is not visible
-        * by the time the interrupt fires, we will see that the
-        * request is incomplete and go back to sleep awaiting
-        * another interrupt that will never come.)
-        *
-        * Strictly, we only need to do this once after an interrupt,
-        * but it is easier and safer to do it every time the waiter
-        * is woken.
-        */
-       if (engine->irq_seqno_barrier &&
-           test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
-               struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
-               /* The ordering of irq_posted versus applying the barrier
-                * is crucial. The clearing of the current irq_posted must
-                * be visible before we perform the barrier operation,
-                * such that if a subsequent interrupt arrives, irq_posted
-                * is reasserted and our task rewoken (which causes us to
-                * do another __i915_request_irq_complete() immediately
-                * and reapply the barrier). Conversely, if the clear
-                * occurs after the barrier, then an interrupt that arrived
-                * whilst we waited on the barrier would not trigger a
-                * barrier on the next pass, and the read may not see the
-                * seqno update.
-                */
-               engine->irq_seqno_barrier(engine);
-
-               /* If we consume the irq, but we are no longer the bottom-half,
-                * the real bottom-half may not have serialised their own
-                * seqno check with the irq-barrier (i.e. may have inspected
-                * the seqno before we believe it coherent since they see
-                * irq_posted == false but we are still running).
-                */
-               spin_lock_irq(&b->irq_lock);
-               if (b->irq_wait && b->irq_wait->tsk != current)
-                       /* Note that if the bottom-half is changed as we
-                        * are sending the wake-up, the new bottom-half will
-                        * be woken by whomever made the change. We only have
-                        * to worry about when we steal the irq-posted for
-                        * ourself.
-                        */
-                       wake_up_process(b->irq_wait->tsk);
-               spin_unlock_irq(&b->irq_lock);
-
-               if (__i915_request_completed(rq, seqno))
-                       return true;
-       }
-
-       return false;
-}
-
 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
 
index 7399ac7a562924890b47c5eac4d52371bbcd8e2c..ea85da39366299a30d37525d7f54508af43b8a18 100644 (file)
@@ -25,7 +25,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -859,58 +858,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
        obj->write_domain = 0;
 }
 
-static inline int
-__copy_to_user_swizzled(char __user *cpu_vaddr,
-                       const char *gpu_vaddr, int gpu_offset,
-                       int length)
-{
-       int ret, cpu_offset = 0;
-
-       while (length > 0) {
-               int cacheline_end = ALIGN(gpu_offset + 1, 64);
-               int this_length = min(cacheline_end - gpu_offset, length);
-               int swizzled_gpu_offset = gpu_offset ^ 64;
-
-               ret = __copy_to_user(cpu_vaddr + cpu_offset,
-                                    gpu_vaddr + swizzled_gpu_offset,
-                                    this_length);
-               if (ret)
-                       return ret + length;
-
-               cpu_offset += this_length;
-               gpu_offset += this_length;
-               length -= this_length;
-       }
-
-       return 0;
-}
-
-static inline int
-__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
-                         const char __user *cpu_vaddr,
-                         int length)
-{
-       int ret, cpu_offset = 0;
-
-       while (length > 0) {
-               int cacheline_end = ALIGN(gpu_offset + 1, 64);
-               int this_length = min(cacheline_end - gpu_offset, length);
-               int swizzled_gpu_offset = gpu_offset ^ 64;
-
-               ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
-                                      cpu_vaddr + cpu_offset,
-                                      this_length);
-               if (ret)
-                       return ret + length;
-
-               cpu_offset += this_length;
-               gpu_offset += this_length;
-               length -= this_length;
-       }
-
-       return 0;
-}
-
 /*
  * Pins the specified object's pages and synchronizes the object with
  * GPU accesses. Sets needs_clflush to non-zero if the caller should
@@ -1030,72 +977,23 @@ err_unpin:
        return ret;
 }
 
-static void
-shmem_clflush_swizzled_range(char *addr, unsigned long length,
-                            bool swizzled)
-{
-       if (unlikely(swizzled)) {
-               unsigned long start = (unsigned long) addr;
-               unsigned long end = (unsigned long) addr + length;
-
-               /* For swizzling simply ensure that we always flush both
-                * channels. Lame, but simple and it works. Swizzled
-                * pwrite/pread is far from a hotpath - current userspace
-                * doesn't use it at all. */
-               start = round_down(start, 128);
-               end = round_up(end, 128);
-
-               drm_clflush_virt_range((void *)start, end - start);
-       } else {
-               drm_clflush_virt_range(addr, length);
-       }
-
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
 static int
-shmem_pread_slow(struct page *page, int offset, int length,
-                char __user *user_data,
-                bool page_do_bit17_swizzling, bool needs_clflush)
+shmem_pread(struct page *page, int offset, int len, char __user *user_data,
+           bool needs_clflush)
 {
        char *vaddr;
        int ret;
 
        vaddr = kmap(page);
-       if (needs_clflush)
-               shmem_clflush_swizzled_range(vaddr + offset, length,
-                                            page_do_bit17_swizzling);
 
-       if (page_do_bit17_swizzling)
-               ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
-       else
-               ret = __copy_to_user(user_data, vaddr + offset, length);
-       kunmap(page);
+       if (needs_clflush)
+               drm_clflush_virt_range(vaddr + offset, len);
 
-       return ret ? - EFAULT : 0;
-}
+       ret = __copy_to_user(user_data, vaddr + offset, len);
 
-static int
-shmem_pread(struct page *page, int offset, int length, char __user *user_data,
-           bool page_do_bit17_swizzling, bool needs_clflush)
-{
-       int ret;
-
-       ret = -ENODEV;
-       if (!page_do_bit17_swizzling) {
-               char *vaddr = kmap_atomic(page);
-
-               if (needs_clflush)
-                       drm_clflush_virt_range(vaddr + offset, length);
-               ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
-               kunmap_atomic(vaddr);
-       }
-       if (ret == 0)
-               return 0;
+       kunmap(page);
 
-       return shmem_pread_slow(page, offset, length, user_data,
-                               page_do_bit17_swizzling, needs_clflush);
+       return ret ? -EFAULT : 0;
 }
 
 static int
@@ -1104,15 +1002,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
 {
        char __user *user_data;
        u64 remain;
-       unsigned int obj_do_bit17_swizzling;
        unsigned int needs_clflush;
        unsigned int idx, offset;
        int ret;
 
-       obj_do_bit17_swizzling = 0;
-       if (i915_gem_object_needs_bit17_swizzle(obj))
-               obj_do_bit17_swizzling = BIT(17);
-
        ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
        if (ret)
                return ret;
@@ -1130,7 +1023,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
                unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
 
                ret = shmem_pread(page, offset, length, user_data,
-                                 page_to_phys(page) & obj_do_bit17_swizzling,
                                  needs_clflush);
                if (ret)
                        break;
@@ -1470,33 +1362,6 @@ out_unlock:
        return ret;
 }
 
-static int
-shmem_pwrite_slow(struct page *page, int offset, int length,
-                 char __user *user_data,
-                 bool page_do_bit17_swizzling,
-                 bool needs_clflush_before,
-                 bool needs_clflush_after)
-{
-       char *vaddr;
-       int ret;
-
-       vaddr = kmap(page);
-       if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
-               shmem_clflush_swizzled_range(vaddr + offset, length,
-                                            page_do_bit17_swizzling);
-       if (page_do_bit17_swizzling)
-               ret = __copy_from_user_swizzled(vaddr, offset, user_data,
-                                               length);
-       else
-               ret = __copy_from_user(vaddr + offset, user_data, length);
-       if (needs_clflush_after)
-               shmem_clflush_swizzled_range(vaddr + offset, length,
-                                            page_do_bit17_swizzling);
-       kunmap(page);
-
-       return ret ? -EFAULT : 0;
-}
-
 /* Per-page copy function for the shmem pwrite fastpath.
  * Flushes invalid cachelines before writing to the target if
  * needs_clflush_before is set and flushes out any written cachelines after
@@ -1504,31 +1369,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length,
  */
 static int
 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
-            bool page_do_bit17_swizzling,
             bool needs_clflush_before,
             bool needs_clflush_after)
 {
+       char *vaddr;
        int ret;
 
-       ret = -ENODEV;
-       if (!page_do_bit17_swizzling) {
-               char *vaddr = kmap_atomic(page);
+       vaddr = kmap(page);
 
-               if (needs_clflush_before)
-                       drm_clflush_virt_range(vaddr + offset, len);
-               ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
-               if (needs_clflush_after)
-                       drm_clflush_virt_range(vaddr + offset, len);
+       if (needs_clflush_before)
+               drm_clflush_virt_range(vaddr + offset, len);
 
-               kunmap_atomic(vaddr);
-       }
-       if (ret == 0)
-               return ret;
+       ret = __copy_from_user(vaddr + offset, user_data, len);
+       if (!ret && needs_clflush_after)
+               drm_clflush_virt_range(vaddr + offset, len);
 
-       return shmem_pwrite_slow(page, offset, len, user_data,
-                                page_do_bit17_swizzling,
-                                needs_clflush_before,
-                                needs_clflush_after);
+       kunmap(page);
+
+       return ret ? -EFAULT : 0;
 }
 
 static int
@@ -1538,7 +1396,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        void __user *user_data;
        u64 remain;
-       unsigned int obj_do_bit17_swizzling;
        unsigned int partial_cacheline_write;
        unsigned int needs_clflush;
        unsigned int offset, idx;
@@ -1553,10 +1410,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       obj_do_bit17_swizzling = 0;
-       if (i915_gem_object_needs_bit17_swizzle(obj))
-               obj_do_bit17_swizzling = BIT(17);
-
        /* If we don't overwrite a cacheline completely we need to be
         * careful to have up-to-date data by first clflushing. Don't
         * overcomplicate things and flush the entire patch.
@@ -1573,7 +1426,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
                unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
 
                ret = shmem_pwrite(page, offset, length, user_data,
-                                  page_to_phys(page) & obj_do_bit17_swizzling,
                                   (offset | length) & partial_cacheline_write,
                                   needs_clflush & CLFLUSH_AFTER);
                if (ret)
@@ -3227,13 +3079,6 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
                           struct i915_request *request,
                           bool stalled)
 {
-       /*
-        * Make sure this write is visible before we re-enable the interrupt
-        * handlers on another CPU, as tasklet_enable() resolves to just
-        * a compiler barrier which is insufficient for our purpose here.
-        */
-       smp_store_mb(engine->irq_posted, 0);
-
        if (request)
                request = i915_gem_reset_request(engine, request, stalled);
 
@@ -3315,7 +3160,7 @@ static void nop_submit_request(struct i915_request *request)
 
        spin_lock_irqsave(&request->engine->timeline.lock, flags);
        __i915_request_submit(request);
-       intel_engine_init_global_seqno(request->engine, request->global_seqno);
+       intel_engine_write_global_seqno(request->engine, request->global_seqno);
        spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
 }
 
@@ -3356,7 +3201,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 
        /*
         * Make sure no request can slip through without getting completed by
-        * either this call here to intel_engine_init_global_seqno, or the one
+        * either this call here to intel_engine_write_global_seqno, or the one
         * in nop_submit_request.
         */
        synchronize_rcu();
@@ -3384,6 +3229,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
        if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
                return true;
 
+       if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
+               return false;
+
        GEM_TRACE("start\n");
 
        /*
@@ -3422,8 +3270,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
        i915_retire_requests(i915);
        GEM_BUG_ON(i915->gt.active_requests);
 
-       if (!intel_gpu_reset(i915, ALL_ENGINES))
-               intel_engines_sanitize(i915);
+       intel_engines_sanitize(i915, false);
 
        /*
         * Undo nop_submit_request. We prevent all new i915 requests from
@@ -5027,8 +4874,6 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
 
 void i915_gem_sanitize(struct drm_i915_private *i915)
 {
-       int err;
-
        GEM_TRACE("\n");
 
        mutex_lock(&i915->drm.struct_mutex);
@@ -5053,11 +4898,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
         * it may impact the display and we are uncertain about the stability
         * of the reset, so this could be applied to even earlier gen.
         */
-       err = -ENODEV;
-       if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
-               err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
-       if (!err)
-               intel_engines_sanitize(i915);
+       intel_engines_sanitize(i915, false);
 
        intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
        intel_runtime_pm_put(i915);
@@ -5223,15 +5064,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
        I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
                                 DISP_TILE_SURFACE_SWIZZLING);
 
-       if (IS_GEN5(dev_priv))
+       if (IS_GEN(dev_priv, 5))
                return;
 
        I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN(dev_priv, 6))
                I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
-       else if (IS_GEN7(dev_priv))
+       else if (IS_GEN(dev_priv, 7))
                I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
-       else if (IS_GEN8(dev_priv))
+       else if (IS_GEN(dev_priv, 8))
                I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
        else
                BUG();
@@ -5253,10 +5094,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
                init_unused_ring(dev_priv, SRB1_BASE);
                init_unused_ring(dev_priv, SRB2_BASE);
                init_unused_ring(dev_priv, SRB3_BASE);
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                init_unused_ring(dev_priv, SRB0_BASE);
                init_unused_ring(dev_priv, SRB1_BASE);
-       } else if (IS_GEN3(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 3)) {
                init_unused_ring(dev_priv, PRB1_BASE);
                init_unused_ring(dev_priv, PRB2_BASE);
        }
@@ -5580,7 +5421,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
        }
 
        ret = i915_gem_init_scratch(dev_priv,
-                                   IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
+                                   IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
                goto err_ggtt;
index 4ec386950f75d72e1337a7f93f986b514caad875..5933adbe3d995bca9de6be942f2e72b54c9572c8 100644 (file)
@@ -86,7 +86,6 @@
  */
 
 #include <linux/log2.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
@@ -311,7 +310,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
                address_mode = INTEL_LEGACY_64B_CONTEXT;
        desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-       if (IS_GEN8(i915))
+       if (IS_GEN(i915, 8))
                desc |= GEN8_CTX_L3LLC_COHERENT;
 
        /* TODO: WaDisableLiteRestore when we start using semaphore
index 82e2ca17a441eed4c9f562b9d9ee9aa45f2e740a..02f7298bfe57cda5ad1d344adb0ff99079267c00 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/dma-buf.h>
 #include <linux/reservation.h>
 
-#include <drm/drmP.h>
 
 #include "i915_drv.h"
 
index 02b83a5ed96c9ec7b539bec4bdc88ed3ac1946cd..f6855401f24714b56356f0512c32be3c61a0ba5b 100644 (file)
@@ -26,7 +26,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
index 485b259127c36fdb4aeb619559e790f394ff412a..e7994505d850c961eaa1298782262ce3437eb0d3 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/sync_file.h>
 #include <linux/uaccess.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_syncobj.h>
 #include <drm/i915_drm.h>
 
@@ -1380,7 +1379,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
                 * batchbuffers.
                 */
                if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-                   IS_GEN6(eb->i915)) {
+                   IS_GEN(eb->i915, 6)) {
                        err = i915_vma_bind(target, target->obj->cache_level,
                                            PIN_GLOBAL);
                        if (WARN_ONCE(err,
@@ -1896,7 +1895,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
        u32 *cs;
        int i;
 
-       if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
+       if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
                DRM_DEBUG("sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
index d548ac05ccd7a45994f38960dd121e79b4d6ecf9..d67c07cdd0b882b5f02d55d854a83d249780b4e5 100644 (file)
@@ -21,7 +21,6 @@
  * IN THE SOFTWARE.
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
         * and explicitly managed for internal users.
         */
 
-       if (IS_GEN2(fence->i915))
+       if (IS_GEN(fence->i915, 2))
                i830_write_fence_reg(fence, vma);
-       else if (IS_GEN3(fence->i915))
+       else if (IS_GEN(fence->i915, 3))
                i915_write_fence_reg(fence, vma);
        else
                i965_write_fence_reg(fence, vma);
@@ -596,13 +595,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
                                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
                        }
                }
-       } else if (IS_GEN5(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 5)) {
                /* On Ironlake whatever DRAM config, GPU always do
                 * same swizzling setup.
                 */
                swizzle_x = I915_BIT_6_SWIZZLE_9_10;
                swizzle_y = I915_BIT_6_SWIZZLE_9;
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                /* As far as we know, the 865 doesn't have these bit 6
                 * swizzling issues.
                 */
@@ -647,7 +646,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
                }
 
                /* check for L-shaped memory aka modified enhanced addressing */
-               if (IS_GEN4(dev_priv) &&
+               if (IS_GEN(dev_priv, 4) &&
                    !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
                        swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
                        swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
index add1fe7aeb930f2e21e73d12dbf96f095decc5be..a8807fbed0aa1585e5c0ac354c56f45bf47966b9 100644 (file)
@@ -33,7 +33,6 @@
 
 #include <asm/set_memory.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
@@ -483,7 +482,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
         * attempt holding the lock is immediately reported by lockdep.
         */
        mutex_init(&vm->mutex);
-       i915_gem_shrinker_taints_mutex(&vm->mutex);
+       i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
 
        GEM_BUG_ON(!vm->total);
        drm_mm_init(&vm->mm, 0, vm->total);
@@ -1423,8 +1422,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
                        gen8_initialize_pd(vm, pd);
                        gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
                        GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
-
-                       mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
                }
 
                ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
@@ -1490,84 +1487,6 @@ unwind:
        return -ENOMEM;
 }
 
-static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
-                         struct i915_page_directory_pointer *pdp,
-                         u64 start, u64 length,
-                         gen8_pte_t scratch_pte,
-                         struct seq_file *m)
-{
-       struct i915_address_space *vm = &ppgtt->vm;
-       struct i915_page_directory *pd;
-       u32 pdpe;
-
-       gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-               struct i915_page_table *pt;
-               u64 pd_len = length;
-               u64 pd_start = start;
-               u32 pde;
-
-               if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
-                       continue;
-
-               seq_printf(m, "\tPDPE #%d\n", pdpe);
-               gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
-                       u32 pte;
-                       gen8_pte_t *pt_vaddr;
-
-                       if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
-                               continue;
-
-                       pt_vaddr = kmap_atomic_px(pt);
-                       for (pte = 0; pte < GEN8_PTES; pte += 4) {
-                               u64 va = (pdpe << GEN8_PDPE_SHIFT |
-                                         pde << GEN8_PDE_SHIFT |
-                                         pte << GEN8_PTE_SHIFT);
-                               int i;
-                               bool found = false;
-
-                               for (i = 0; i < 4; i++)
-                                       if (pt_vaddr[pte + i] != scratch_pte)
-                                               found = true;
-                               if (!found)
-                                       continue;
-
-                               seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
-                               for (i = 0; i < 4; i++) {
-                                       if (pt_vaddr[pte + i] != scratch_pte)
-                                               seq_printf(m, " %llx", pt_vaddr[pte + i]);
-                                       else
-                                               seq_puts(m, "  SCRATCH ");
-                               }
-                               seq_puts(m, "\n");
-                       }
-                       kunmap_atomic(pt_vaddr);
-               }
-       }
-}
-
-static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
-{
-       struct i915_address_space *vm = &ppgtt->vm;
-       const gen8_pte_t scratch_pte = vm->scratch_pte;
-       u64 start = 0, length = ppgtt->vm.total;
-
-       if (use_4lvl(vm)) {
-               u64 pml4e;
-               struct i915_pml4 *pml4 = &ppgtt->pml4;
-               struct i915_page_directory_pointer *pdp;
-
-               gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-                       if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
-                               continue;
-
-                       seq_printf(m, "    PML4E #%llu\n", pml4e);
-                       gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
-               }
-       } else {
-               gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
-       }
-}
-
 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 {
        struct i915_address_space *vm = &ppgtt->vm;
@@ -1672,7 +1591,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
                gen8_ppgtt_notify_vgt(ppgtt, true);
 
        ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-       ppgtt->debug_dump = gen8_dump_ppgtt;
 
        ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
        ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
@@ -1688,60 +1606,6 @@ err_free:
        return ERR_PTR(err);
 }
 
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
-{
-       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
-       const gen6_pte_t scratch_pte = base->vm.scratch_pte;
-       struct i915_page_table *pt;
-       u32 pte, pde;
-
-       gen6_for_all_pdes(pt, &base->pd, pde) {
-               gen6_pte_t *vaddr;
-
-               if (pt == base->vm.scratch_pt)
-                       continue;
-
-               if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
-                       u32 expected =
-                               GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
-                               GEN6_PDE_VALID;
-                       u32 pd_entry = readl(ppgtt->pd_addr + pde);
-
-                       if (pd_entry != expected)
-                               seq_printf(m,
-                                          "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
-                                          pde,
-                                          pd_entry,
-                                          expected);
-
-                       seq_printf(m, "\tPDE: %x\n", pd_entry);
-               }
-
-               vaddr = kmap_atomic_px(base->pd.page_table[pde]);
-               for (pte = 0; pte < GEN6_PTES; pte += 4) {
-                       int i;
-
-                       for (i = 0; i < 4; i++)
-                               if (vaddr[pte + i] != scratch_pte)
-                                       break;
-                       if (i == 4)
-                               continue;
-
-                       seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
-                                  pde, pte,
-                                  (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
-                       for (i = 0; i < 4; i++) {
-                               if (vaddr[pte + i] != scratch_pte)
-                                       seq_printf(m, " %08x", vaddr[pte + i]);
-                               else
-                                       seq_puts(m, "  SCRATCH");
-                       }
-                       seq_puts(m, "\n");
-               }
-               kunmap_atomic(vaddr);
-       }
-}
-
 /* Write pde (index) from the page directory @pd to the page table @pt */
 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
                                  const unsigned int pde,
@@ -2075,6 +1939,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
 {
        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+       int err;
 
        /*
         * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +1955,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
         * allocator works in address space sizes, so it's multiplied by page
         * size. We allocate at the top of the GTT to avoid fragmentation.
         */
-       return i915_vma_pin(ppgtt->vma,
-                           0, GEN6_PD_ALIGN,
-                           PIN_GLOBAL | PIN_HIGH);
+       err = i915_vma_pin(ppgtt->vma,
+                          0, GEN6_PD_ALIGN,
+                          PIN_GLOBAL | PIN_HIGH);
+       if (err)
+               goto unpin;
+
+       return 0;
+
+unpin:
+       ppgtt->pin_count = 0;
+       return err;
 }
 
 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
@@ -2129,7 +2002,6 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
        ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
        ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
        ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
-       ppgtt->base.debug_dump = gen6_dump_ppgtt;
 
        ppgtt->base.vm.vma_ops.bind_vma    = ppgtt_bind_vma;
        ppgtt->base.vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
@@ -2195,9 +2067,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
 {
        gtt_write_workarounds(dev_priv);
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN(dev_priv, 6))
                gen6_ppgtt_enable(dev_priv);
-       else if (IS_GEN7(dev_priv))
+       else if (IS_GEN(dev_priv, 7))
                gen7_ppgtt_enable(dev_priv);
 
        return 0;
@@ -2279,7 +2151,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
        /* Query intel_iommu to see if we need the workaround. Presumably that
         * was loaded first.
         */
-       return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
+       return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
 }
 
 static void gen6_check_faults(struct drm_i915_private *dev_priv)
@@ -2372,7 +2244,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
                                     DMA_ATTR_NO_WARN))
                        return 0;
 
-               /* If the DMA remap fails, one cause can be that we have
+               /*
+                * If the DMA remap fails, one cause can be that we have
                 * too many objects pinned in a small remapping table,
                 * such as swiotlb. Incrementally purge all other objects and
                 * try again - if there are no more pages to remove from
@@ -2382,8 +2255,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
        } while (i915_gem_shrink(to_i915(obj->base.dev),
                                 obj->base.size >> PAGE_SHIFT, NULL,
                                 I915_SHRINK_BOUND |
-                                I915_SHRINK_UNBOUND |
-                                I915_SHRINK_ACTIVE));
+                                I915_SHRINK_UNBOUND));
 
        return -ENOSPC;
 }
index 4874da09a3c471d24697b55b4ab7687d2d76afbf..e2360f16427ab793fd273e73b2cabfa3f5a44805 100644 (file)
@@ -413,8 +413,6 @@ struct i915_hw_ppgtt {
                struct i915_page_directory_pointer pdp; /* GEN8+ */
                struct i915_page_directory pd;          /* GEN6-7 */
        };
-
-       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
 struct gen6_hw_ppgtt {
index 0d0144b2104cb3264b2048a1569bc598fde532f3..fddde1033e747ee65f0914114975f9d8d29c88de 100644 (file)
@@ -22,7 +22,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
index a6dd7c46de0dddb4a8f2c09a21651ac1c1a8cb21..ff3da64470ddaded4a86c99570e5b9063cfac658 100644 (file)
@@ -29,7 +29,8 @@
 
 #include <drm/drm_vma_manager.h>
 #include <drm/drm_gem.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_device.h>
 
 #include <drm/i915_drm.h>
 
index ea90d3a0d51143dc4a189b0e15a3ecf7c37c214c..6cc2b964c955d09a6b4636c85f5d458d2e2074ea 100644 (file)
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 #include <linux/vmalloc.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "i915_trace.h"
 
-static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
+static bool shrinker_lock(struct drm_i915_private *i915,
+                         unsigned int flags,
+                         bool *unlock)
 {
-       switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
+       struct mutex *m = &i915->drm.struct_mutex;
+
+       switch (mutex_trylock_recursive(m)) {
        case MUTEX_TRYLOCK_RECURSIVE:
                *unlock = false;
                return true;
 
        case MUTEX_TRYLOCK_FAILED:
                *unlock = false;
-               preempt_disable();
-               do {
-                       cpu_relax();
-                       if (mutex_trylock(&i915->drm.struct_mutex)) {
-                               *unlock = true;
-                               break;
-                       }
-               } while (!need_resched());
-               preempt_enable();
+               if (flags & I915_SHRINK_ACTIVE &&
+                   mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
+                       *unlock = true;
                return *unlock;
 
        case MUTEX_TRYLOCK_SUCCESS:
@@ -160,7 +157,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
        unsigned long scanned = 0;
        bool unlock;
 
-       if (!shrinker_lock(i915, &unlock))
+       if (!shrinker_lock(i915, flags, &unlock))
                return 0;
 
        /*
@@ -357,7 +354,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 
        sc->nr_scanned = 0;
 
-       if (!shrinker_lock(i915, &unlock))
+       if (!shrinker_lock(i915, 0, &unlock))
                return SHRINK_STOP;
 
        freed = i915_gem_shrink(i915,
@@ -388,31 +385,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        return sc->nr_scanned ? freed : SHRINK_STOP;
 }
 
-static bool
-shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
-                             int timeout_ms)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
-
-       do {
-               if (i915_gem_wait_for_idle(i915,
-                                          0, MAX_SCHEDULE_TIMEOUT) == 0 &&
-                   shrinker_lock(i915, unlock))
-                       break;
-
-               schedule_timeout_killable(1);
-               if (fatal_signal_pending(current))
-                       return false;
-
-               if (time_after(jiffies, timeout)) {
-                       pr_err("Unable to lock GPU to purge memory.\n");
-                       return false;
-               }
-       } while (1);
-
-       return true;
-}
-
 static int
 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 {
@@ -421,7 +393,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        struct drm_i915_gem_object *obj;
        unsigned long unevictable, bound, unbound, freed_pages;
 
-       freed_pages = i915_gem_shrink_all(i915);
+       intel_runtime_pm_get(i915);
+       freed_pages = i915_gem_shrink(i915, -1UL, NULL,
+                                     I915_SHRINK_BOUND |
+                                     I915_SHRINK_UNBOUND);
+       intel_runtime_pm_put(i915);
 
        /* Because we may be allocating inside our own driver, we cannot
         * assert that there are no objects with pinned pages that are not
@@ -447,10 +423,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
                pr_info("Purging GPU memory, %lu pages freed, "
                        "%lu pages still pinned.\n",
                        freed_pages, unevictable);
-       if (unbound || bound)
-               pr_err("%lu and %lu pages still available in the "
-                      "bound and unbound GPU page lists.\n",
-                      bound, unbound);
 
        *(unsigned long *)ptr += freed_pages;
        return NOTIFY_DONE;
@@ -464,23 +436,20 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
        struct i915_vma *vma, *next;
        unsigned long freed_pages = 0;
        bool unlock;
-       int ret;
 
-       if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
+       if (!shrinker_lock(i915, 0, &unlock))
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_LOCKED,
-                                    MAX_SCHEDULE_TIMEOUT);
-       if (ret)
+       if (i915_gem_wait_for_idle(i915,
+                                  I915_WAIT_LOCKED,
+                                  MAX_SCHEDULE_TIMEOUT))
                goto out;
 
        intel_runtime_pm_get(i915);
        freed_pages += i915_gem_shrink(i915, -1UL, NULL,
                                       I915_SHRINK_BOUND |
                                       I915_SHRINK_UNBOUND |
-                                      I915_SHRINK_ACTIVE |
                                       I915_SHRINK_VMAPS);
        intel_runtime_pm_put(i915);
 
@@ -533,13 +502,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
        unregister_shrinker(&i915->mm.shrinker);
 }
 
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+                                   struct mutex *mutex)
 {
+       bool unlock = false;
+
        if (!IS_ENABLED(CONFIG_LOCKDEP))
                return;
 
+       if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
+               mutex_acquire(&i915->drm.struct_mutex.dep_map,
+                             I915_MM_NORMAL, 0, _RET_IP_);
+               unlock = true;
+       }
+
        fs_reclaim_acquire(GFP_KERNEL);
-       mutex_lock(mutex);
-       mutex_unlock(mutex);
+
+       /*
+        * As we invariably rely on the struct_mutex within the shrinker,
+        * but have a complicated recursion dance, taint all the mutexes used
+        * within the shrinker with the struct_mutex. For completeness, we
+        * taint with all subclass of struct_mutex, even though we should
+        * only need tainting by I915_MM_NORMAL to catch possible ABBA
+        * deadlocks from using struct_mutex inside @mutex.
+        */
+       mutex_acquire(&i915->drm.struct_mutex.dep_map,
+                     I915_MM_SHRINKER, 0, _RET_IP_);
+
+       mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
+       mutex_release(&mutex->dep_map, 0, _RET_IP_);
+
+       mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+
        fs_reclaim_release(GFP_KERNEL);
+
+       if (unlock)
+               mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
 }
index f29a7ff7c362c7d21e60c1d822465cfe6a3d2090..9df615eea2d8798c97cef5920023d4b99e0e7691 100644 (file)
@@ -26,7 +26,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
                resource_size_t ggtt_start;
 
                ggtt_start = I915_READ(PGTBL_CTL);
-               if (IS_GEN4(dev_priv))
+               if (IS_GEN(dev_priv, 4))
                        ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
                                     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
                else
@@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
                 * GEN3 firmware likes to smash pci bridges into the stolen
                 * range. Apparently this works.
                 */
-               if (r == NULL && !IS_GEN3(dev_priv)) {
+               if (r == NULL && !IS_GEN(dev_priv, 3)) {
                        DRM_ERROR("conflict detected with stolen region: %pR\n",
                                  dsm);
 
@@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
         * Whether ILK really reuses the ELK register for this is unclear.
         * Let's see if we catch anyone with this supposedly enabled on ILK.
         */
-       WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
+       WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+            reg_val);
 
        if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
                return;
index d9dc9df523b58e9a8645838e9c2ffa2080c54d61..16cc9ddbce34ae8e97e59838c2e687af6a0f52f9 100644 (file)
@@ -27,7 +27,6 @@
 
 #include <linux/string.h>
 #include <linux/bitops.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
        }
 
        /* Previous chips need a power-of-two fence region when tiling */
-       if (IS_GEN3(i915))
+       if (IS_GEN(i915, 3))
                ggtt_size = 1024*1024;
        else
                ggtt_size = 512*1024;
@@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
                        return false;
        }
 
-       if (IS_GEN2(i915) ||
+       if (IS_GEN(i915, 2) ||
            (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
                tile_width = 128;
        else
index 9558582c105ec4953ba0616b23966000a095cfab..1fb6a7bb505445b1e1e61d47d21cd4a58cc2cf8f 100644 (file)
@@ -22,7 +22,6 @@
  *
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
index 07465123c1663c61818fc1b63c1c04cab782ba06..5eaf586c4d48cfe173990aad6b68362824c17afc 100644 (file)
@@ -594,13 +594,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
 
 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
                                   const struct intel_device_info *info,
+                                  const struct intel_runtime_info *runtime,
                                   const struct intel_driver_caps *caps)
 {
        struct drm_printer p = i915_error_printer(m);
 
        intel_device_info_dump_flags(info, &p);
        intel_driver_caps_print(caps, &p);
-       intel_device_info_dump_topology(&info->sseu, &p);
+       intel_device_info_dump_topology(&runtime->sseu, &p);
 }
 
 static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -664,7 +665,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 
        if (*error->error_msg)
                err_printf(m, "%s\n", error->error_msg);
-       err_printf(m, "Kernel: %s\n", init_utsname()->release);
+       err_printf(m, "Kernel: %s %s\n",
+                  init_utsname()->release,
+                  init_utsname()->machine);
        ts = ktime_to_timespec64(error->time);
        err_printf(m, "Time: %lld s %ld us\n",
                   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -735,7 +738,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
                err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
        }
 
-       if (IS_GEN7(m->i915))
+       if (IS_GEN(m->i915, 7))
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
        for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -844,7 +847,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
        if (error->display)
                intel_display_print_error_state(m, error->display);
 
-       err_print_capabilities(m, &error->device_info, &error->driver_caps);
+       err_print_capabilities(m, &error->device_info, &error->runtime_info,
+                              &error->driver_caps);
        err_print_params(m, &error->params);
        err_print_uc(m, &error->uc);
 }
@@ -963,17 +967,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
        kfree(obj);
 }
 
-static __always_inline void free_param(const char *type, void *x)
-{
-       if (!__builtin_strcmp(type, "char *"))
-               kfree(*(void **)x);
-}
 
 static void cleanup_params(struct i915_gpu_state *error)
 {
-#define FREE(T, x, ...) free_param(#T, &error->params.x);
-       I915_PARAMS_FOR_EACH(FREE);
-#undef FREE
+       i915_params_free(&error->params);
 }
 
 static void cleanup_uc_state(struct i915_gpu_state *error)
@@ -1037,7 +1034,7 @@ i915_error_object_create(struct drm_i915_private *i915,
        dma_addr_t dma;
        int ret;
 
-       if (!vma)
+       if (!vma || !vma->pages)
                return NULL;
 
        num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -1314,7 +1311,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
        if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
                i915_reg_t mmio;
 
-               if (IS_GEN7(dev_priv)) {
+               if (IS_GEN(dev_priv, 7)) {
                        switch (engine->id) {
                        default:
                        case RCS:
@@ -1330,7 +1327,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
                                mmio = VEBOX_HWS_PGA_GEN7;
                                break;
                        }
-               } else if (IS_GEN6(engine->i915)) {
+               } else if (IS_GEN(engine->i915, 6)) {
                        mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
                } else {
                        /* XXX: gen8 returns to sanity */
@@ -1352,10 +1349,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 
                ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
-               if (IS_GEN6(dev_priv))
+               if (IS_GEN(dev_priv, 6))
                        ee->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE_READ(engine));
-               else if (IS_GEN7(dev_priv))
+               else if (IS_GEN(dev_priv, 7))
                        ee->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE(engine));
                else if (INTEL_GEN(dev_priv) >= 8)
@@ -1725,7 +1722,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
        }
 
-       if (IS_GEN7(dev_priv))
+       if (IS_GEN(dev_priv, 7))
                error->err_int = I915_READ(GEN7_ERR_INT);
 
        if (INTEL_GEN(dev_priv) >= 8) {
@@ -1733,7 +1730,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
        }
 
-       if (IS_GEN6(dev_priv)) {
+       if (IS_GEN(dev_priv, 6)) {
                error->forcewake = I915_READ_FW(FORCEWAKE);
                error->gab_ctl = I915_READ(GAB_CTL);
                error->gfx_mode = I915_READ(GFX_MODE);
@@ -1753,7 +1750,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->ccid = I915_READ(CCID);
 
        /* 3: Feature specific registers */
-       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+       if (IS_GEN_RANGE(dev_priv, 6, 7)) {
                error->gam_ecochk = I915_READ(GAM_ECOCHK);
                error->gac_eco = I915_READ(GAC_ECO_BITS);
        }
@@ -1777,7 +1774,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
                error->ier = I915_READ(DEIER);
                error->gtier[0] = I915_READ(GTIER);
                error->ngtier = 1;
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                error->ier = I915_READ16(IER);
        } else if (!IS_VALLEYVIEW(dev_priv)) {
                error->ier = I915_READ(IER);
@@ -1831,21 +1828,15 @@ static void capture_gen_state(struct i915_gpu_state *error)
        memcpy(&error->device_info,
               INTEL_INFO(i915),
               sizeof(error->device_info));
+       memcpy(&error->runtime_info,
+              RUNTIME_INFO(i915),
+              sizeof(error->runtime_info));
        error->driver_caps = i915->caps;
 }
 
-static __always_inline void dup_param(const char *type, void *x)
-{
-       if (!__builtin_strcmp(type, "char *"))
-               *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
-}
-
 static void capture_params(struct i915_gpu_state *error)
 {
-       error->params = i915_modparams;
-#define DUP(T, x, ...) dup_param(#T, &error->params.x);
-       I915_PARAMS_FOR_EACH(DUP);
-#undef DUP
+       i915_params_copy(&error->params, &i915_modparams);
 }
 
 static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
@@ -1907,9 +1898,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
 {
        struct i915_gpu_state *error;
 
+       /* Check if GPU capture has been disabled */
+       error = READ_ONCE(i915->gpu_error.first_error);
+       if (IS_ERR(error))
+               return error;
+
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
-       if (!error)
-               return NULL;
+       if (!error) {
+               i915_disable_error_state(i915, -ENOMEM);
+               return ERR_PTR(-ENOMEM);
+       }
 
        kref_init(&error->ref);
        error->i915 = i915;
@@ -1945,11 +1943,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
                return;
 
        error = i915_capture_gpu_state(i915);
-       if (!error) {
-               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
-               i915_disable_error_state(i915, -ENOMEM);
+       if (IS_ERR(error))
                return;
-       }
 
        i915_error_capture_msg(i915, error, engine_mask, error_msg);
        DRM_INFO("%s\n", error->error_msg);
@@ -1987,7 +1982,7 @@ i915_first_error_state(struct drm_i915_private *i915)
 
        spin_lock_irq(&i915->gpu_error.lock);
        error = i915->gpu_error.first_error;
-       if (error)
+       if (!IS_ERR_OR_NULL(error))
                i915_gpu_state_get(error);
        spin_unlock_irq(&i915->gpu_error.lock);
 
@@ -2000,10 +1995,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
 
        spin_lock_irq(&i915->gpu_error.lock);
        error = i915->gpu_error.first_error;
-       i915->gpu_error.first_error = NULL;
+       if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
+               i915->gpu_error.first_error = NULL;
        spin_unlock_irq(&i915->gpu_error.lock);
 
-       if (!IS_ERR(error))
+       if (!IS_ERR_OR_NULL(error))
                i915_gpu_state_put(error);
 }
 
index ff2652bbb0b08bb22cab8d32f338c1bdf8643e81..6d9f45468ac180728e7e7ce68f71847d117cd6a8 100644 (file)
@@ -45,6 +45,7 @@ struct i915_gpu_state {
        u32 reset_count;
        u32 suspend_count;
        struct intel_device_info device_info;
+       struct intel_runtime_info runtime_info;
        struct intel_driver_caps driver_caps;
        struct i915_params params;
 
index e869daf9c8a9e0c21f409506ab60240f76bd95c7..73c3e8f519e8fbfa463a4c830c28a86638eb8710 100644 (file)
@@ -28,7 +28,6 @@
  */
 #include <linux/compat.h>
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
index d447d7d508f483c62baecad23035a60702fd6a3c..288b0662f7b7b3f63a0f6f6d582d7e5d43c4b36f 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/sysrq.h>
 #include <linux/slab.h>
 #include <linux/circ_buf.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
@@ -950,7 +949,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                vtotal /= 2;
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
        else
                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -1030,7 +1029,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        if (stime)
                *stime = ktime_get();
 
-       if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+       if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
                /* No obvious pixelcount register. Only query vertical
                 * scanout position from Display scan line register.
                 */
@@ -1090,7 +1089,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        else
                position += vtotal - vbl_end;
 
-       if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+       if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
                *vpos = position;
                *hpos = 0;
        } else {
@@ -1189,13 +1188,6 @@ static void notify_ring(struct intel_engine_cs *engine)
                                rq = i915_request_get(waiter);
 
                        tsk = wait->tsk;
-               } else {
-                       if (engine->irq_seqno_barrier &&
-                           i915_seqno_passed(seqno, wait->seqno - 1)) {
-                               set_bit(ENGINE_IRQ_BREADCRUMB,
-                                       &engine->irq_posted);
-                               tsk = wait->tsk;
-                       }
                }
 
                engine->breadcrumbs.irq_count++;
@@ -2547,7 +2539,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
                I915_WRITE(SDEIIR, pch_iir);
        }
 
-       if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+       if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
                ironlake_rps_change_irq_handler(dev_priv);
 }
 
@@ -3243,7 +3235,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
 {
        u32 eir;
 
-       if (!IS_GEN2(dev_priv))
+       if (!IS_GEN(dev_priv, 2))
                I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
 
        if (INTEL_GEN(dev_priv) < 4)
@@ -3586,11 +3578,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       if (IS_GEN5(dev_priv))
-               I915_WRITE(HWSTAM, 0xffffffff);
-
        GEN3_IRQ_RESET(DE);
-       if (IS_GEN7(dev_priv))
+       if (IS_GEN(dev_priv, 7))
                I915_WRITE(GEN7_ERR_INT, 0xffffffff);
 
        if (IS_HASWELL(dev_priv)) {
@@ -4045,7 +4034,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
        }
 
        gt_irqs |= GT_RENDER_USER_INTERRUPT;
-       if (IS_GEN5(dev_priv)) {
+       if (IS_GEN(dev_priv, 5)) {
                gt_irqs |= ILK_BSD_USER_INTERRUPT;
        } else {
                gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -4183,9 +4172,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
                };
 
-       if (HAS_L3_DPF(dev_priv))
-               gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
        dev_priv->pm_ier = 0x0;
        dev_priv->pm_imr = ~dev_priv->pm_ier;
        GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
@@ -4368,8 +4354,6 @@ static void i8xx_irq_reset(struct drm_device *dev)
 
        i9xx_pipestat_irq_reset(dev_priv);
 
-       I915_WRITE16(HWSTAM, 0xffff);
-
        GEN2_IRQ_RESET();
 }
 
@@ -4537,8 +4521,6 @@ static void i915_irq_reset(struct drm_device *dev)
 
        i9xx_pipestat_irq_reset(dev_priv);
 
-       I915_WRITE(HWSTAM, 0xffffffff);
-
        GEN3_IRQ_RESET();
 }
 
@@ -4648,8 +4630,6 @@ static void i965_irq_reset(struct drm_device *dev)
 
        i9xx_pipestat_irq_reset(dev_priv);
 
-       I915_WRITE(HWSTAM, 0xffffffff);
-
        GEN3_IRQ_RESET();
 }
 
@@ -4836,7 +4816,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) >= 8)
                rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 
-       if (IS_GEN2(dev_priv)) {
+       if (IS_GEN(dev_priv, 2)) {
                /* Gen2 doesn't have a hardware frame counter */
                dev->max_vblank_count = 0;
        } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
@@ -4852,7 +4832,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
         * Gen2 doesn't have a hardware frame counter and so depends on
         * vblank interrupts to produce sane vblank seuquence numbers.
         */
-       if (!IS_GEN2(dev_priv))
+       if (!IS_GEN(dev_priv, 2))
                dev->vblank_disable_immediate = true;
 
        /* Most platforms treat the display irq block as an always-on
@@ -4924,14 +4904,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->disable_vblank = ironlake_disable_vblank;
                dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
        } else {
-               if (IS_GEN2(dev_priv)) {
+               if (IS_GEN(dev_priv, 2)) {
                        dev->driver->irq_preinstall = i8xx_irq_reset;
                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
                        dev->driver->irq_handler = i8xx_irq_handler;
                        dev->driver->irq_uninstall = i8xx_irq_reset;
                        dev->driver->enable_vblank = i8xx_enable_vblank;
                        dev->driver->disable_vblank = i8xx_disable_vblank;
-               } else if (IS_GEN3(dev_priv)) {
+               } else if (IS_GEN(dev_priv, 3)) {
                        dev->driver->irq_preinstall = i915_irq_reset;
                        dev->driver->irq_postinstall = i915_irq_postinstall;
                        dev->driver->irq_uninstall = i915_irq_reset;
index 2e0356561839d15016d72394d7960388545fb223..9f0539bdaa398dbfb4050df67f952eb0207e1c57 100644 (file)
@@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600,
        "triaging and debugging hangs.");
 #endif
 
-i915_param_named_unsafe(enable_hangcheck, bool, 0644,
+i915_param_named_unsafe(enable_hangcheck, bool, 0600,
        "Periodically check GPU activity for detecting hangs. "
        "WARNING: Disabling this can cause system wide hangs. "
        "(default: true)");
@@ -203,3 +203,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
        I915_PARAMS_FOR_EACH(PRINT);
 #undef PRINT
 }
+
+static __always_inline void dup_param(const char *type, void *x)
+{
+       if (!__builtin_strcmp(type, "char *"))
+               *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
+}
+
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
+{
+       *dest = *src;
+#define DUP(T, x, ...) dup_param(#T, &dest->x);
+       I915_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
+static __always_inline void free_param(const char *type, void *x)
+{
+       if (!__builtin_strcmp(type, "char *")) {
+               kfree(*(void **)x);
+               *(void **)x = NULL;
+       }
+}
+
+/* free the allocated members, *not* the passed in params itself */
+void i915_params_free(struct i915_params *params)
+{
+#define FREE(T, x, ...) free_param(#T, &params->x);
+       I915_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
index 7e56c516c815c269230c697da6166a7fae455fd8..6efcf330bdab83fa41aefac760e0965d5531454d 100644 (file)
@@ -33,6 +33,15 @@ struct drm_printer;
 #define ENABLE_GUC_SUBMISSION          BIT(0)
 #define ENABLE_GUC_LOAD_HUC            BIT(1)
 
+/*
+ * Invoke param, a function-like macro, for each i915 param, with arguments:
+ *
+ * param(type, name, value)
+ *
+ * type: parameter type, one of {bool, int, unsigned int, char *}
+ * name: name of the parameter
+ * value: initial/default value of the parameter
+ */
 #define I915_PARAMS_FOR_EACH(param) \
        param(char *, vbt_firmware, NULL) \
        param(int, modeset, -1) \
@@ -78,6 +87,8 @@ struct i915_params {
 extern struct i915_params i915_modparams __read_mostly;
 
 void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
+void i915_params_free(struct i915_params *params);
 
 #endif
 
index 6350db5503cda372cedcda638680dfcec9725a7e..dd4aff2b256e910408ba5d7a5d35ca4fa34a9d5e 100644 (file)
@@ -82,6 +82,7 @@
        .display.has_overlay = 1, \
        .display.overlay_needs_physical = 1, \
        .display.has_gmch_display = 1, \
+       .gpu_reset_clobbers_display = true, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
        .ring_mask = RENDER_RING, \
@@ -122,6 +123,7 @@ static const struct intel_device_info intel_i865g_info = {
        GEN(3), \
        .num_pipes = 2, \
        .display.has_gmch_display = 1, \
+       .gpu_reset_clobbers_display = true, \
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@ -198,6 +200,7 @@ static const struct intel_device_info intel_pineview_info = {
        .num_pipes = 2, \
        .display.has_hotplug = 1, \
        .display.has_gmch_display = 1, \
+       .gpu_reset_clobbers_display = true, \
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@ -228,6 +231,7 @@ static const struct intel_device_info intel_g45_info = {
        GEN4_FEATURES,
        PLATFORM(INTEL_G45),
        .ring_mask = RENDER_RING | BSD_RING,
+       .gpu_reset_clobbers_display = false,
 };
 
 static const struct intel_device_info intel_gm45_info = {
@@ -237,6 +241,7 @@ static const struct intel_device_info intel_gm45_info = {
        .display.has_fbc = 1,
        .display.supports_tv = 1,
        .ring_mask = RENDER_RING | BSD_RING,
+       .gpu_reset_clobbers_display = false,
 };
 
 #define GEN5_FEATURES \
@@ -532,7 +537,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
        .display.has_fbc = 1, \
        .display.has_psr = 1, \
        .has_runtime_pm = 1, \
-       .has_pooled_eu = 0, \
        .display.has_csr = 1, \
        .has_rc6 = 1, \
        .display.has_dp_mst = 1, \
index 2b2eb57ca71f2905020aed9f7797684f69c148e7..5b1ae5ed97b35671080c0b57a3549f4f233b4b2a 100644 (file)
@@ -1796,7 +1796,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
         * be read back from automatically triggered reports, as part of the
         * RPT_ID field.
         */
-       if (IS_GEN(dev_priv, 9, 11)) {
+       if (IS_GEN_RANGE(dev_priv, 9, 11)) {
                I915_WRITE(GEN8_OA_DEBUG,
                           _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
                                              GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2646,7 +2646,7 @@ err:
 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
 {
        return div64_u64(1000000000ULL * (2ULL << exponent),
-                        1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
+                        1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
 }
 
 /**
@@ -3415,7 +3415,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                dev_priv->perf.oa.ops.read = gen8_oa_read;
                dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
 
-               if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
+               if (IS_GEN_RANGE(dev_priv, 8, 9)) {
                        dev_priv->perf.oa.ops.is_valid_b_counter_reg =
                                gen7_is_valid_b_counter_addr;
                        dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3431,7 +3431,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                        dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
                        dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
 
-                       if (IS_GEN8(dev_priv)) {
+                       if (IS_GEN(dev_priv, 8)) {
                                dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
                                dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
 
@@ -3442,7 +3442,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
 
                                dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
                        }
-               } else if (IS_GEN(dev_priv, 10, 11)) {
+               } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
                        dev_priv->perf.oa.ops.is_valid_b_counter_reg =
                                gen7_is_valid_b_counter_addr;
                        dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3471,7 +3471,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
 
                oa_sample_rate_hard_limit = 1000 *
-                       (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
+                       (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
                dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
 
                mutex_init(&dev_priv->perf.metrics_lock);
index fe56465cdfd67512aca73d3cdf0bc3b7e7f6bf2f..cbcb957b7141d768541853129cdc029d5f804029 100644 (file)
@@ -13,7 +13,7 @@
 static int query_topology_info(struct drm_i915_private *dev_priv,
                               struct drm_i915_query_item *query_item)
 {
-       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        struct drm_i915_query_topology_info topo;
        u32 slice_length, subslice_length, eu_length, total_length;
 
index 0a7d60509ca7527f018a12208316bff91c7c6be8..44958d994bfa9a72c9437e0ded97a3994bf47ebf 100644 (file)
@@ -139,6 +139,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
        return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
 }
 
+#define VLV_DISPLAY_BASE               0x180000
+#define VLV_MIPI_BASE                  VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE                  0x60000
+
+#define DISPLAY_MMIO_BASE(dev_priv)    (INTEL_INFO(dev_priv)->display_mmio_offset)
+
 /*
  * Given the first two numbers __a and __b of arbitrarily many evenly spaced
  * numbers, pick the 0-based __index'th value.
@@ -179,15 +185,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  * Device info offset array based helpers for groups of registers with unevenly
  * spaced base offsets.
  */
-#define _MMIO_PIPE2(pipe, reg)         _MMIO(dev_priv->info.pipe_offsets[pipe] - \
-                                             dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
-                                             dev_priv->info.display_mmio_offset)
-#define _MMIO_TRANS2(pipe, reg)                _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
-                                             dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
-                                             dev_priv->info.display_mmio_offset)
-#define _CURSOR2(pipe, reg)            _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
-                                             dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
-                                             dev_priv->info.display_mmio_offset)
+#define _MMIO_PIPE2(pipe, reg)         _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
+                                             INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
+                                             DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_TRANS2(pipe, reg)                _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
+                                             INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
+                                             DISPLAY_MMIO_BASE(dev_priv))
+#define _CURSOR2(pipe, reg)            _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
+                                             INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
+                                             DISPLAY_MMIO_BASE(dev_priv))
 
 #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
 #define _MASKED_FIELD(mask, value) ({                                     \
@@ -347,6 +353,24 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN11_GRDOM_MEDIA4            (1 << 8)
 #define  GEN11_GRDOM_VECS              (1 << 13)
 #define  GEN11_GRDOM_VECS2             (1 << 14)
+#define  GEN11_GRDOM_SFC0              (1 << 17)
+#define  GEN11_GRDOM_SFC1              (1 << 18)
+
+#define  GEN11_VCS_SFC_RESET_BIT(instance)     (GEN11_GRDOM_SFC0 << ((instance) >> 1))
+#define  GEN11_VECS_SFC_RESET_BIT(instance)    (GEN11_GRDOM_SFC0 << (instance))
+
+#define GEN11_VCS_SFC_FORCED_LOCK(engine)      _MMIO((engine)->mmio_base + 0x88C)
+#define   GEN11_VCS_SFC_FORCED_LOCK_BIT                (1 << 0)
+#define GEN11_VCS_SFC_LOCK_STATUS(engine)      _MMIO((engine)->mmio_base + 0x890)
+#define   GEN11_VCS_SFC_USAGE_BIT              (1 << 0)
+#define   GEN11_VCS_SFC_LOCK_ACK_BIT           (1 << 1)
+
+#define GEN11_VECS_SFC_FORCED_LOCK(engine)     _MMIO((engine)->mmio_base + 0x201C)
+#define   GEN11_VECS_SFC_FORCED_LOCK_BIT       (1 << 0)
+#define GEN11_VECS_SFC_LOCK_ACK(engine)                _MMIO((engine)->mmio_base + 0x2018)
+#define   GEN11_VECS_SFC_LOCK_ACK_BIT          (1 << 0)
+#define GEN11_VECS_SFC_USAGE(engine)           _MMIO((engine)->mmio_base + 0x2014)
+#define   GEN11_VECS_SFC_USAGE_BIT             (1 << 0)
 
 #define RING_PP_DIR_BASE(engine)       _MMIO((engine)->mmio_base + 0x228)
 #define RING_PP_DIR_BASE_READ(engine)  _MMIO((engine)->mmio_base + 0x518)
@@ -1866,6 +1890,10 @@ enum i915_power_well_id {
 
 #define CNL_PORT_TX_DW7_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
 #define CNL_PORT_TX_DW7_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
+#define ICL_PORT_TX_DW7_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
+#define ICL_PORT_TX_DW7_GRP(port)      _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
+#define ICL_PORT_TX_DW7_LN0(port)      _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
+#define ICL_PORT_TX_DW7_LN(port, ln)   _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
 #define   N_SCALAR(x)                  ((x) << 24)
 #define   N_SCALAR_MASK                        (0x7F << 24)
 
@@ -2592,10 +2620,6 @@ enum i915_power_well_id {
 
 #define   GEN11_GFX_DISABLE_LEGACY_MODE        (1 << 3)
 
-#define VLV_DISPLAY_BASE 0x180000
-#define VLV_MIPI_BASE VLV_DISPLAY_BASE
-#define BXT_MIPI_BASE 0x60000
-
 #define VLV_GU_CTL0    _MMIO(VLV_DISPLAY_BASE + 0x2030)
 #define VLV_GU_CTL1    _MMIO(VLV_DISPLAY_BASE + 0x2034)
 #define SCPD0          _MMIO(0x209c) /* 915+ only */
@@ -3152,9 +3176,9 @@ enum i915_power_well_id {
 /*
  * Clock control & power management
  */
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
+#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
+#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
 #define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
 
 #define VGA0   _MMIO(0x6000)
@@ -3251,9 +3275,9 @@ enum i915_power_well_id {
 #define   SDVO_MULTIPLIER_SHIFT_HIRES          4
 #define   SDVO_MULTIPLIER_SHIFT_VGA            0
 
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
-#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
+#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
+#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
 #define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
 
 /*
@@ -3325,7 +3349,7 @@ enum i915_power_well_id {
 #define  DSTATE_PLL_D3_OFF                     (1 << 3)
 #define  DSTATE_GFX_CLOCK_GATING               (1 << 1)
 #define  DSTATE_DOT_CLOCK_GATING               (1 << 0)
-#define DSPCLK_GATE_D  _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
+#define DSPCLK_GATE_D  _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
 # define DPUNIT_B_CLOCK_GATE_DISABLE           (1 << 30) /* 965 */
 # define VSUNIT_CLOCK_GATE_DISABLE             (1 << 29) /* 965 */
 # define VRHUNIT_CLOCK_GATE_DISABLE            (1 << 28) /* 965 */
@@ -3465,7 +3489,7 @@ enum i915_power_well_id {
 #define _PALETTE_A             0xa000
 #define _PALETTE_B             0xa800
 #define _CHV_PALETTE_C         0xc000
-#define PALETTE(pipe, i)       _MMIO(dev_priv->info.display_mmio_offset + \
+#define PALETTE(pipe, i)       _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
                                      _PICK((pipe), _PALETTE_A,         \
                                            _PALETTE_B, _CHV_PALETTE_C) + \
                                      (i) * 4)
@@ -4298,7 +4322,7 @@ enum {
 
 
 /* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN                _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
+#define PORT_HOTPLUG_EN                _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
 #define   PORTB_HOTPLUG_INT_EN                 (1 << 29)
 #define   PORTC_HOTPLUG_INT_EN                 (1 << 28)
 #define   PORTD_HOTPLUG_INT_EN                 (1 << 27)
@@ -4328,7 +4352,7 @@ enum {
 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV       (0 << 2)
 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV       (1 << 2)
 
-#define PORT_HOTPLUG_STAT      _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
+#define PORT_HOTPLUG_STAT      _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
 /*
  * HDMI/DP bits are g4x+
  *
@@ -4410,7 +4434,7 @@ enum {
 
 #define PORT_DFT_I9XX                          _MMIO(0x61150)
 #define   DC_BALANCE_RESET                     (1 << 25)
-#define PORT_DFT2_G4X          _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
+#define PORT_DFT2_G4X          _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
 #define   DC_BALANCE_RESET_VLV                 (1 << 31)
 #define   PIPE_SCRAMBLE_RESET_MASK             ((1 << 14) | (0x3 << 0))
 #define   PIPE_C_SCRAMBLE_RESET                        (1 << 14) /* chv */
@@ -4695,7 +4719,7 @@ enum {
 #define  PANEL_POWER_CYCLE_DELAY_SHIFT 0
 
 /* Panel fitting */
-#define PFIT_CONTROL   _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
+#define PFIT_CONTROL   _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
 #define   PFIT_ENABLE          (1 << 31)
 #define   PFIT_PIPE_MASK       (3 << 29)
 #define   PFIT_PIPE_SHIFT      29
@@ -4713,7 +4737,7 @@ enum {
 #define   PFIT_SCALING_PROGRAMMED (1 << 26)
 #define   PFIT_SCALING_PILLAR  (2 << 26)
 #define   PFIT_SCALING_LETTER  (3 << 26)
-#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
+#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
 /* Pre-965 */
 #define                PFIT_VERT_SCALE_SHIFT           20
 #define                PFIT_VERT_SCALE_MASK            0xfff00000
@@ -4725,25 +4749,25 @@ enum {
 #define                PFIT_HORIZ_SCALE_SHIFT_965      0
 #define                PFIT_HORIZ_SCALE_MASK_965       0x00001fff
 
-#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
+#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
 
-#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
 #define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
                                         _VLV_BLC_PWM_CTL2_B)
 
-#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
 #define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
                                        _VLV_BLC_PWM_CTL_B)
 
-#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
 #define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
                                         _VLV_BLC_HIST_CTL_B)
 
 /* Backlight control */
-#define BLC_PWM_CTL2   _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2   _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
 #define   BLM_PWM_ENABLE               (1 << 31)
 #define   BLM_COMBINATION_MODE         (1 << 30) /* gen4 only */
 #define   BLM_PIPE_SELECT              (1 << 29)
@@ -4766,7 +4790,7 @@ enum {
 #define   BLM_PHASE_IN_COUNT_MASK      (0xff << 8)
 #define   BLM_PHASE_IN_INCR_SHIFT      (0)
 #define   BLM_PHASE_IN_INCR_MASK       (0xff << 0)
-#define BLC_PWM_CTL    _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
+#define BLC_PWM_CTL    _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
 /*
  * This is the most significant 15 bits of the number of backlight cycles in a
  * complete cycle of the modulated backlight control.
@@ -4788,7 +4812,7 @@ enum {
 #define   BACKLIGHT_DUTY_CYCLE_MASK_PNV                (0xfffe)
 #define   BLM_POLARITY_PNV                     (1 << 0) /* pnv only */
 
-#define BLC_HIST_CTL   _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
+#define BLC_HIST_CTL   _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
 #define  BLM_HISTOGRAM_ENABLE                  (1 << 31)
 
 /* New registers for PCH-split platforms. Safe where new bits show up, the
@@ -5412,47 +5436,47 @@ enum {
  * is 20 bytes in each direction, hence the 5 fixed
  * data registers
  */
-#define _DPA_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64010)
-#define _DPA_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64014)
-#define _DPA_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64018)
-#define _DPA_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6401c)
-#define _DPA_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64020)
-#define _DPA_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64024)
-
-#define _DPB_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64110)
-#define _DPB_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64114)
-#define _DPB_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64118)
-#define _DPB_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6411c)
-#define _DPB_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64120)
-#define _DPB_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64124)
-
-#define _DPC_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64210)
-#define _DPC_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64214)
-#define _DPC_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64218)
-#define _DPC_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6421c)
-#define _DPC_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64220)
-#define _DPC_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64224)
-
-#define _DPD_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64310)
-#define _DPD_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64314)
-#define _DPD_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64318)
-#define _DPD_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6431c)
-#define _DPD_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64320)
-#define _DPD_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64324)
-
-#define _DPE_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64410)
-#define _DPE_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64414)
-#define _DPE_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64418)
-#define _DPE_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6441c)
-#define _DPE_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64420)
-#define _DPE_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64424)
-
-#define _DPF_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64510)
-#define _DPF_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64514)
-#define _DPF_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64518)
-#define _DPF_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6451c)
-#define _DPF_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64520)
-#define _DPF_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64524)
+#define _DPA_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
+#define _DPA_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
+#define _DPA_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
+#define _DPA_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
+#define _DPA_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
+#define _DPA_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
+
+#define _DPB_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPB_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+#define _DPB_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
+#define _DPB_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
+#define _DPB_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
+#define _DPB_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
+
+#define _DPC_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
+#define _DPC_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
+#define _DPC_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
+#define _DPC_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
+#define _DPC_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
+#define _DPC_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
+
+#define _DPD_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
+#define _DPD_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
+#define _DPD_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
+#define _DPD_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
+#define _DPD_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
+#define _DPD_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
+
+#define _DPE_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
+#define _DPE_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
+#define _DPE_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
+#define _DPE_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
+#define _DPE_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
+#define _DPE_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
+
+#define _DPF_AUX_CH_CTL                (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
+#define _DPF_AUX_CH_DATA1      (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
+#define _DPF_AUX_CH_DATA2      (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
+#define _DPF_AUX_CH_DATA3      (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
+#define _DPF_AUX_CH_DATA4      (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
+#define _DPF_AUX_CH_DATA5      (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
 
 #define DP_AUX_CH_CTL(aux_ch)  _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
 #define DP_AUX_CH_DATA(aux_ch, i)      _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5728,7 +5752,7 @@ enum {
 #define   DPINVGTT_STATUS_MASK                 0xff
 #define   DPINVGTT_STATUS_MASK_CHV             0xfff
 
-#define DSPARB                 _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
+#define DSPARB                 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
 #define   DSPARB_CSTART_MASK   (0x7f << 7)
 #define   DSPARB_CSTART_SHIFT  7
 #define   DSPARB_BSTART_MASK   (0x7f)
@@ -5763,7 +5787,7 @@ enum {
 #define   DSPARB_SPRITEF_MASK_VLV      (0xff << 8)
 
 /* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1         _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
+#define DSPFW1         _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
 #define   DSPFW_SR_SHIFT               23
 #define   DSPFW_SR_MASK                        (0x1ff << 23)
 #define   DSPFW_CURSORB_SHIFT          16
@@ -5774,7 +5798,7 @@ enum {
 #define   DSPFW_PLANEA_SHIFT           0
 #define   DSPFW_PLANEA_MASK            (0x7f << 0)
 #define   DSPFW_PLANEA_MASK_VLV                (0xff << 0) /* vlv/chv */
-#define DSPFW2         _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
+#define DSPFW2         _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
 #define   DSPFW_FBC_SR_EN              (1 << 31)         /* g4x */
 #define   DSPFW_FBC_SR_SHIFT           28
 #define   DSPFW_FBC_SR_MASK            (0x7 << 28) /* g4x */
@@ -5790,7 +5814,7 @@ enum {
 #define   DSPFW_SPRITEA_SHIFT          0
 #define   DSPFW_SPRITEA_MASK           (0x7f << 0) /* g4x */
 #define   DSPFW_SPRITEA_MASK_VLV       (0xff << 0) /* vlv/chv */
-#define DSPFW3         _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
+#define DSPFW3         _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
 #define   DSPFW_HPLL_SR_EN             (1 << 31)
 #define   PINEVIEW_SELF_REFRESH_EN     (1 << 30)
 #define   DSPFW_CURSOR_SR_SHIFT                24
@@ -6206,35 +6230,35 @@ enum {
  * [10:1f] all
  * [30:32] all
  */
-#define SWF0(i)        _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
-#define SWF1(i)        _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
-#define SWF3(i)        _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
+#define SWF0(i)        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(i)        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(i)        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
 #define SWF_ILK(i)     _MMIO(0x4F000 + (i) * 4)
 
 /* Pipe B */
-#define _PIPEBDSL              (dev_priv->info.display_mmio_offset + 0x71000)
-#define _PIPEBCONF             (dev_priv->info.display_mmio_offset + 0x71008)
-#define _PIPEBSTAT             (dev_priv->info.display_mmio_offset + 0x71024)
+#define _PIPEBDSL              (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
+#define _PIPEBCONF             (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
+#define _PIPEBSTAT             (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
 #define _PIPEBFRAMEHIGH                0x71040
 #define _PIPEBFRAMEPIXEL       0x71044
-#define _PIPEB_FRMCOUNT_G4X    (dev_priv->info.display_mmio_offset + 0x71040)
-#define _PIPEB_FLIPCOUNT_G4X   (dev_priv->info.display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_G4X    (DISPLAY_MMIO_BASE(dev_priv) + 0x71040)
+#define _PIPEB_FLIPCOUNT_G4X   (DISPLAY_MMIO_BASE(dev_priv) + 0x71044)
 
 
 /* Display B control */
-#define _DSPBCNTR              (dev_priv->info.display_mmio_offset + 0x71180)
+#define _DSPBCNTR              (DISPLAY_MMIO_BASE(dev_priv) + 0x71180)
 #define   DISPPLANE_ALPHA_TRANS_ENABLE         (1 << 15)
 #define   DISPPLANE_ALPHA_TRANS_DISABLE                0
 #define   DISPPLANE_SPRITE_ABOVE_DISPLAY       0
 #define   DISPPLANE_SPRITE_ABOVE_OVERLAY       (1)
-#define _DSPBADDR              (dev_priv->info.display_mmio_offset + 0x71184)
-#define _DSPBSTRIDE            (dev_priv->info.display_mmio_offset + 0x71188)
-#define _DSPBPOS               (dev_priv->info.display_mmio_offset + 0x7118C)
-#define _DSPBSIZE              (dev_priv->info.display_mmio_offset + 0x71190)
-#define _DSPBSURF              (dev_priv->info.display_mmio_offset + 0x7119C)
-#define _DSPBTILEOFF           (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBOFFSET            (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBSURFLIVE          (dev_priv->info.display_mmio_offset + 0x711AC)
+#define _DSPBADDR              (DISPLAY_MMIO_BASE(dev_priv) + 0x71184)
+#define _DSPBSTRIDE            (DISPLAY_MMIO_BASE(dev_priv) + 0x71188)
+#define _DSPBPOS               (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C)
+#define _DSPBSIZE              (DISPLAY_MMIO_BASE(dev_priv) + 0x71190)
+#define _DSPBSURF              (DISPLAY_MMIO_BASE(dev_priv) + 0x7119C)
+#define _DSPBTILEOFF           (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBOFFSET            (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBSURFLIVE          (DISPLAY_MMIO_BASE(dev_priv) + 0x711AC)
 
 /* ICL DSI 0 and 1 */
 #define _PIPEDSI0CONF          0x7b008
@@ -8786,7 +8810,7 @@ enum {
 #define   GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
 
 /* Audio */
-#define G4X_AUD_VID_DID                        _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
+#define G4X_AUD_VID_DID                        _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
 #define   INTEL_AUDIO_DEVCL            0x808629FB
 #define   INTEL_AUDIO_DEVBLC           0x80862801
 #define   INTEL_AUDIO_DEVCTG           0x80862802
index cefefc11d922636418294760681b8ade6a310a1d..d1355154886a3266661a9295df73a1192c5576ff 100644 (file)
@@ -111,99 +111,10 @@ i915_request_remove_from_client(struct i915_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
+static void reserve_gt(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       struct i915_timeline *timeline;
-       enum intel_engine_id id;
-       int ret;
-
-       /* Carefully retire all requests without writing to the rings */
-       ret = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED,
-                                    MAX_SCHEDULE_TIMEOUT);
-       if (ret)
-               return ret;
-
-       GEM_BUG_ON(i915->gt.active_requests);
-
-       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       for_each_engine(engine, i915, id) {
-               GEM_TRACE("%s seqno %d (current %d) -> %d\n",
-                         engine->name,
-                         engine->timeline.seqno,
-                         intel_engine_get_seqno(engine),
-                         seqno);
-
-               if (seqno == engine->timeline.seqno)
-                       continue;
-
-               kthread_park(engine->breadcrumbs.signaler);
-
-               if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
-                       /* Flush any waiters before we reuse the seqno */
-                       intel_engine_disarm_breadcrumbs(engine);
-                       intel_engine_init_hangcheck(engine);
-                       GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
-               }
-
-               /* Check we are idle before we fiddle with hw state! */
-               GEM_BUG_ON(!intel_engine_is_idle(engine));
-               GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
-
-               /* Finally reset hw state */
-               intel_engine_init_global_seqno(engine, seqno);
-               engine->timeline.seqno = seqno;
-
-               kthread_unpark(engine->breadcrumbs.signaler);
-       }
-
-       list_for_each_entry(timeline, &i915->gt.timelines, link)
-               memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
-
-       i915->gt.request_serial = seqno;
-
-       return 0;
-}
-
-int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *i915 = to_i915(dev);
-
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
-       if (seqno == 0)
-               return -EINVAL;
-
-       /* HWS page needs to be set less than what we will inject to ring */
-       return reset_all_global_seqno(i915, seqno - 1);
-}
-
-static int reserve_gt(struct drm_i915_private *i915)
-{
-       int ret;
-
-       /*
-        * Reservation is fine until we may need to wrap around
-        *
-        * By incrementing the serial for every request, we know that no
-        * individual engine may exceed that serial (as each is reset to 0
-        * on any wrap). This protects even the most pessimistic of migrations
-        * of every request from all engines onto just one.
-        */
-       while (unlikely(++i915->gt.request_serial == 0)) {
-               ret = reset_all_global_seqno(i915, 0);
-               if (ret) {
-                       i915->gt.request_serial--;
-                       return ret;
-               }
-       }
-
        if (!i915->gt.active_requests++)
                i915_gem_unpark(i915);
-
-       return 0;
 }
 
 static void unreserve_gt(struct drm_i915_private *i915)
@@ -566,6 +477,38 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
        return NOTIFY_DONE;
 }
 
+static void ring_retire_requests(struct intel_ring *ring)
+{
+       struct i915_request *rq, *rn;
+
+       list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
+               if (!i915_request_completed(rq))
+                       break;
+
+               i915_request_retire(rq);
+       }
+}
+
+static noinline struct i915_request *
+i915_request_alloc_slow(struct intel_context *ce)
+{
+       struct intel_ring *ring = ce->ring;
+       struct i915_request *rq;
+
+       if (list_empty(&ring->request_list))
+               goto out;
+
+       /* Ratelimit ourselves to prevent oom from malicious clients */
+       rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+       cond_synchronize_rcu(rq->rcustate);
+
+       /* Retire our old requests in the hope that we free some */
+       ring_retire_requests(ring);
+
+out:
+       return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
+}
+
 /**
  * i915_request_alloc - allocate a request structure
  *
@@ -608,13 +551,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        if (IS_ERR(ce))
                return ERR_CAST(ce);
 
-       ret = reserve_gt(i915);
-       if (ret)
-               goto err_unpin;
-
-       ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
-       if (ret)
-               goto err_unreserve;
+       reserve_gt(i915);
 
        /* Move our oldest request to the slab-cache (if not in use!) */
        rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -654,15 +591,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        rq = kmem_cache_alloc(i915->requests,
                              GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
        if (unlikely(!rq)) {
-               i915_retire_requests(i915);
-
-               /* Ratelimit ourselves to prevent oom from malicious clients */
-               rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
-                                        &i915->drm.struct_mutex);
-               if (rq)
-                       cond_synchronize_rcu(rq->rcustate);
-
-               rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+               rq = i915_request_alloc_slow(ce);
                if (!rq) {
                        ret = -ENOMEM;
                        goto err_unreserve;
@@ -707,9 +636,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         * i915_request_add() call can't fail. Note that the reserve may need
         * to be redone if the request is not actually submitted straight
         * away, e.g. because a GPU scheduler has deferred it.
+        *
+        * Note that due to how we add reserved_space to intel_ring_begin()
+        * we need to double our request to ensure that if we need to wrap
+        * around inside i915_request_add() there is sufficient space at
+        * the beginning of the ring as well.
         */
-       rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
-       GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
+       rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32);
 
        /*
         * Record the position of the start of the request so that
@@ -719,11 +652,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         */
        rq->head = rq->ring->emit;
 
-       /* Unconditionally invalidate GPU caches and TLBs. */
-       ret = engine->emit_flush(rq, EMIT_INVALIDATE);
-       if (ret)
-               goto err_unwind;
-
        ret = engine->request_alloc(rq);
        if (ret)
                goto err_unwind;
@@ -748,7 +676,6 @@ err_unwind:
        kmem_cache_free(i915->requests, rq);
 err_unreserve:
        unreserve_gt(i915);
-err_unpin:
        intel_context_unpin(ce);
        return ERR_PTR(ret);
 }
@@ -776,34 +703,12 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
                ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
                                                       &from->submit,
                                                       I915_FENCE_GFP);
-               return ret < 0 ? ret : 0;
-       }
-
-       if (to->engine->semaphore.sync_to) {
-               u32 seqno;
-
-               GEM_BUG_ON(!from->engine->semaphore.signal);
-
-               seqno = i915_request_global_seqno(from);
-               if (!seqno)
-                       goto await_dma_fence;
-
-               if (seqno <= to->timeline->global_sync[from->engine->id])
-                       return 0;
-
-               trace_i915_gem_ring_sync_to(to, from);
-               ret = to->engine->semaphore.sync_to(to, from);
-               if (ret)
-                       return ret;
-
-               to->timeline->global_sync[from->engine->id] = seqno;
-               return 0;
+       } else {
+               ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                   &from->fence, 0,
+                                                   I915_FENCE_GFP);
        }
 
-await_dma_fence:
-       ret = i915_sw_fence_await_dma_fence(&to->submit,
-                                           &from->fence, 0,
-                                           I915_FENCE_GFP);
        return ret < 0 ? ret : 0;
 }
 
@@ -979,8 +884,8 @@ void i915_request_add(struct i915_request *request)
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
+       GEM_BUG_ON(request->reserved_space > request->ring->space);
        request->reserved_space = 0;
-       engine->emit_flush(request, EMIT_FLUSH);
 
        /*
         * Record the position of the start of the breadcrumb so that
@@ -1298,13 +1203,7 @@ restart:
                set_current_state(state);
 
 wakeup:
-               /*
-                * Carefully check if the request is complete, giving time
-                * for the seqno to be visible following the interrupt.
-                * We also have to check in case we are kicked by the GPU
-                * reset in order to drop the struct_mutex.
-                */
-               if (__i915_request_irq_complete(rq))
+               if (i915_request_completed(rq))
                        break;
 
                /*
@@ -1343,19 +1242,6 @@ complete:
        return timeout;
 }
 
-static void ring_retire_requests(struct intel_ring *ring)
-{
-       struct i915_request *request, *next;
-
-       list_for_each_entry_safe(request, next,
-                                &ring->request_list, ring_link) {
-               if (!i915_request_completed(request))
-                       break;
-
-               i915_request_retire(request);
-       }
-}
-
 void i915_retire_requests(struct drm_i915_private *i915)
 {
        struct intel_ring *ring, *tmp;
index 90e9d170a0cd5e00645842e3955df058e4a8b338..d014b060544569727cefa9c208a81bb4e7dbba20 100644 (file)
@@ -30,7 +30,6 @@
 #include "i915_gem.h"
 #include "i915_scheduler.h"
 #include "i915_sw_fence.h"
-#include "i915_scheduler.h"
 
 #include <uapi/drm/i915_drm.h>
 
index 8f3aa4dc0c98596a3b0443e493be84403d120bc3..f18afa2bac8d87efa40fe2187d45aff240462a8b 100644 (file)
@@ -24,7 +24,6 @@
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "intel_drv.h"
 #include "i915_reg.h"
@@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv)
 
        i915_save_display(dev_priv);
 
-       if (IS_GEN4(dev_priv))
+       if (IS_GEN(dev_priv, 4))
                pci_read_config_word(pdev, GCDGMBUS,
                                     &dev_priv->regfile.saveGCDGMBUS);
 
@@ -77,14 +76,14 @@ int i915_save_state(struct drm_i915_private *dev_priv)
        dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
 
        /* Scratch space */
-       if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+       if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
                for (i = 0; i < 7; i++) {
                        dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
                        dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
                }
                for (i = 0; i < 3; i++)
                        dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                for (i = 0; i < 7; i++)
                        dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
        } else if (HAS_GMCH_DISPLAY(dev_priv)) {
@@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
-       if (IS_GEN4(dev_priv))
+       if (IS_GEN(dev_priv, 4))
                pci_write_config_word(pdev, GCDGMBUS,
                                      dev_priv->regfile.saveGCDGMBUS);
        i915_restore_display(dev_priv);
@@ -122,14 +121,14 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
        I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
 
        /* Scratch space */
-       if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+       if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
                for (i = 0; i < 7; i++) {
                        I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
                        I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
                }
                for (i = 0; i < 3; i++)
                        I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
-       } else if (IS_GEN2(dev_priv)) {
+       } else if (IS_GEN(dev_priv, 2)) {
                for (i = 0; i < 7; i++)
                        I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
        } else if (HAS_GMCH_DISPLAY(dev_priv)) {
index 535caebd9813af5d82701b1d7e47b987563cd608..c0cfe7ae2ba5aecbe3daaf8bccce3eee362753a8 100644 (file)
@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
        ssize_t ret;
 
        gpu = i915_first_error_state(i915);
-       if (gpu) {
+       if (IS_ERR(gpu)) {
+               ret = PTR_ERR(gpu);
+       } else if (gpu) {
                ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
                i915_gpu_state_put(gpu);
        } else {
index ebd71b487220aec95a2bfdeb1c924f0836ae64c1..38c1e15e927a82297f5f9eb43bebed85eccf062b 100644 (file)
@@ -63,14 +63,6 @@ struct i915_timeline {
         * redundant and we can discard it without loss of generality.
         */
        struct i915_syncmap *sync;
-       /**
-        * Separately to the inter-context seqno map above, we track the last
-        * barrier (e.g. semaphore wait) to the global engine timelines. Note
-        * that this tracks global_seqno rather than the context.seqno, and
-        * so it is subject to the limitations of hw wraparound and that we
-        * may need to revoke global_seqno (on pre-emption).
-        */
-       u32 global_sync[I915_NUM_ENGINES];
 
        struct list_head link;
        const char *name;
index b50c6b829715e220c9f3edede3dfa0e83497a804..33d90eca9cdd18e1422614348df8d24e07ccca6c 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-#include <drm/drmP.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
@@ -585,35 +584,6 @@ TRACE_EVENT(i915_gem_evict_vm,
            TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
 );
 
-TRACE_EVENT(i915_gem_ring_sync_to,
-           TP_PROTO(struct i915_request *to, struct i915_request *from),
-           TP_ARGS(to, from),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u32, from_class)
-                            __field(u32, from_instance)
-                            __field(u32, to_class)
-                            __field(u32, to_instance)
-                            __field(u32, seqno)
-                            ),
-
-           TP_fast_assign(
-                          __entry->dev = from->i915->drm.primary->index;
-                          __entry->from_class = from->engine->uabi_class;
-                          __entry->from_instance = from->engine->instance;
-                          __entry->to_class = to->engine->uabi_class;
-                          __entry->to_instance = to->engine->instance;
-                          __entry->seqno = from->global_seqno;
-                          ),
-
-           TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
-                     __entry->dev,
-                     __entry->from_class, __entry->from_instance,
-                     __entry->to_class, __entry->to_instance,
-                     __entry->seqno)
-);
-
 TRACE_EVENT(i915_request_queue,
            TP_PROTO(struct i915_request *rq, u32 flags),
            TP_ARGS(rq, flags),
index 6ba478e57b9bc51f9cebab5f6e00ad442ea8c13a..9d142d038a7d3631ac927280e549528e86756db5 100644 (file)
@@ -6,7 +6,6 @@
  */
 #include <linux/pci.h>
 #include <linux/acpi.h>
-#include <drm/drmP.h>
 #include "i915_drv.h"
 
 #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
index 8cb02f28d30cf6b12fe002c13c8bd9d17f9fa6f2..d8dbc9980281521973ceae05928430333e0c070e 100644 (file)
@@ -29,7 +29,6 @@
  * See intel_atomic_plane.c for the plane-specific atomic functionality.
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
@@ -233,7 +232,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
        if (plane_state && plane_state->base.fb &&
            plane_state->base.fb->format->is_yuv &&
            plane_state->base.fb->format->num_planes > 1) {
-               if (IS_GEN9(dev_priv) &&
+               if (IS_GEN(dev_priv, 9) &&
                    !IS_GEMINILAKE(dev_priv)) {
                        mode = SKL_PS_SCALER_MODE_NV12;
                } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
index 0a73e6e65c2030b2f094c5a89f8248cb807a8020..683a75dad4fb4d7d52b89ca2b4cd132e593f42c3 100644 (file)
@@ -31,7 +31,6 @@
  * prepare/check/commit/cleanup steps.
  */
 
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
index ae55a6865d5cca98f8738cbf8db4cffe5039bfa2..202a58cf2d9f3016fa740062ab55cf2127b28515 100644 (file)
@@ -27,7 +27,6 @@
 #include <drm/intel_lpe_audio.h>
 #include "intel_drv.h"
 
-#include <drm/drmP.h>
 #include <drm/drm_edid.h>
 #include "i915_drv.h"
 
@@ -758,7 +757,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        u32 tmp;
 
-       if (!IS_GEN9(dev_priv))
+       if (!IS_GEN(dev_priv, 9))
                return;
 
        i915_audio_component_get_power(kdev);
index 6d3e0260d49cda5b2d5ffa36152ef6eb544cc14e..140c218128cbf6ee897684e66f7bc441318e7b09 100644 (file)
@@ -26,7 +26,6 @@
  */
 
 #include <drm/drm_dp_helper.h>
-#include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
         * Only parse SDVO mappings on gens that could have SDVO. This isn't
         * accurate and doesn't have to be, as long as it's not too strict.
         */
-       if (!IS_GEN(dev_priv, 3, 7)) {
+       if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
                DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
                return;
        }
@@ -1386,8 +1385,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        info->supports_dp = is_dp;
        info->supports_edp = is_edp;
 
-       DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
-                     port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+       if (bdb_version >= 195)
+               info->supports_typec_usb = child->dp_usb_type_c;
+
+       if (bdb_version >= 209)
+               info->supports_tbt = child->tbt;
+
+       DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
+                     port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
+                     info->supports_typec_usb, info->supports_tbt);
 
        if (is_edp && is_dvi)
                DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
index 447c5256f63a9399f39f2439a558e3b88e4205ae..4ed7105d7ff56f80cdde1675e0f98d3615c4a769 100644 (file)
@@ -166,12 +166,6 @@ static void irq_enable(struct intel_engine_cs *engine)
         */
        GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
 
-       /* Enabling the IRQ may miss the generation of the interrupt, but
-        * we still need to force the barrier before reading the seqno,
-        * just in case.
-        */
-       set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
-
        /* Caller disables interrupts */
        if (engine->irq_enable) {
                spin_lock(&engine->i915->irq_lock);
@@ -683,16 +677,6 @@ static int intel_breadcrumbs_signaler(void *arg)
                }
 
                if (unlikely(do_schedule)) {
-                       /* Before we sleep, check for a missed seqno */
-                       if (current->state & TASK_NORMAL &&
-                           !list_empty(&b->signals) &&
-                           engine->irq_seqno_barrier &&
-                           test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
-                                              &engine->irq_posted)) {
-                               engine->irq_seqno_barrier(engine);
-                               intel_engine_wakeup(engine);
-                       }
-
 sleep:
                        if (kthread_should_park())
                                kthread_parkme();
@@ -859,16 +843,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
        else
                irq_disable(engine);
 
-       /*
-        * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
-        * GPU is active and may have already executed the MI_USER_INTERRUPT
-        * before the CPU is ready to receive. However, the engine is currently
-        * idle (we haven't started it yet), there is no possibility for a
-        * missed interrupt as we enabled the irq and so we can clear the
-        * immediate wakeup (until a real interrupt arrives for the waiter).
-        */
-       clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
-
        spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
index 25e3aba9cded6e45f3751039a3f7bb20845dd7b8..2021e484a2872dfeec109d580ee3fc9f26070577 100644 (file)
@@ -2140,7 +2140,7 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
 {
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return DIV_ROUND_UP(pixel_rate, 2);
-       else if (IS_GEN9(dev_priv) ||
+       else if (IS_GEN(dev_priv, 9) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return pixel_rate;
        else if (IS_CHERRYVIEW(dev_priv))
@@ -2176,7 +2176,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
                if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
                        /* Display WA #1145: glk,cnl */
                        min_cdclk = max(316800, min_cdclk);
-               } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
+               } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
                        /* Display WA #1144: skl,bxt */
                        min_cdclk = max(432000, min_cdclk);
                }
@@ -2537,7 +2537,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
 
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return 2 * max_cdclk_freq;
-       else if (IS_GEN9(dev_priv) ||
+       else if (IS_GEN(dev_priv, 9) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return max_cdclk_freq;
        else if (IS_CHERRYVIEW(dev_priv))
@@ -2785,9 +2785,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.get_cdclk = hsw_get_cdclk;
        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                dev_priv->display.get_cdclk = vlv_get_cdclk;
-       else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+       else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
                dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-       else if (IS_GEN5(dev_priv))
+       else if (IS_GEN(dev_priv, 5))
                dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
        else if (IS_GM45(dev_priv))
                dev_priv->display.get_cdclk = gm45_get_cdclk;
index 5127da286a2b4f61ca5a32ce00220e01a9397f93..37fd9ddf762e94f5f406117ac3b68631372c1c42 100644 (file)
 #define ILK_CSC_COEFF_1_0              \
        ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
 
-static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
+static bool lut_is_legacy(struct drm_property_blob *lut)
 {
-       return !state->degamma_lut &&
-               !state->ctm &&
-               state->gamma_lut &&
-               drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
+       return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
+}
+
+static bool crtc_state_is_legacy_gamma(struct intel_crtc_state *crtc_state)
+{
+       return !crtc_state->base.degamma_lut &&
+               !crtc_state->base.ctm &&
+               crtc_state->base.gamma_lut &&
+               lut_is_legacy(crtc_state->base.gamma_lut);
 }
 
 /*
@@ -108,10 +113,10 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
        return result;
 }
 
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
 {
-       int pipe = intel_crtc->pipe;
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       int pipe = crtc->pipe;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
        I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
        I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
@@ -132,14 +137,12 @@ static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
        I915_WRITE(PIPE_CSC_MODE(pipe), 0);
 }
 
-static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = crtc_state->crtc;
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int i, pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int i, pipe = crtc->pipe;
        uint16_t coeffs[9] = { 0, };
-       struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
        bool limited_color_range = false;
 
        /*
@@ -147,14 +150,14 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
         * do the range compression using the gamma LUT instead.
         */
        if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
-               limited_color_range = intel_crtc_state->limited_color_range;
+               limited_color_range = crtc_state->limited_color_range;
 
-       if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-           intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
-               ilk_load_ycbcr_conversion_matrix(intel_crtc);
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+           crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+               ilk_load_ycbcr_conversion_matrix(crtc);
                return;
-       } else if (crtc_state->ctm) {
-               struct drm_color_ctm *ctm = crtc_state->ctm->data;
+       } else if (crtc_state->base.ctm) {
+               struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
                const u64 *input;
                u64 temp[9];
 
@@ -253,16 +256,15 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
 /*
  * Set up the pipe CSC unit on CherryView.
  */
-static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+static void cherryview_load_csc_matrix(struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc_state->base.crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = to_intel_crtc(crtc)->pipe;
+       int pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
        uint32_t mode;
 
-       if (state->ctm) {
-               struct drm_color_ctm *ctm = state->ctm->data;
+       if (crtc_state->base.ctm) {
+               struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
                uint16_t coeffs[9] = { 0, };
                int i;
 
@@ -293,17 +295,17 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
                I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
        }
 
-       mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
-       if (!crtc_state_is_legacy_gamma(state)) {
-               mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
-                       (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+       mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
+       if (!crtc_state_is_legacy_gamma(crtc_state)) {
+               mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+                       (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
        }
        I915_WRITE(CGM_PIPE_MODE(pipe), mode);
 }
 
-void intel_color_set_csc(struct drm_crtc_state *crtc_state)
+void intel_color_set_csc(struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc_state->crtc->dev;
+       struct drm_device *dev = crtc_state->base.crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev_priv->display.load_csc_matrix)
@@ -311,14 +313,12 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(struct drm_crtc *crtc,
-                                   struct drm_property_blob *blob,
-                                   struct intel_crtc_state *crtc_state)
+static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state,
+                                   struct drm_property_blob *blob)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
        int i;
 
        if (HAS_GMCH_DISPLAY(dev_priv)) {
@@ -353,53 +353,48 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
        }
 }
 
-static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
+static void i9xx_load_luts(struct intel_crtc_state *crtc_state)
 {
-       i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
-                               to_intel_crtc_state(crtc_state));
+       i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
-static void haswell_load_luts(struct drm_crtc_state *crtc_state)
+static void haswell_load_luts(struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = crtc_state->crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *intel_crtc_state =
-               to_intel_crtc_state(crtc_state);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        bool reenable_ips = false;
 
        /*
         * Workaround : Do not read or write the pipe palette/gamma data while
         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
         */
-       if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
-           (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
-               hsw_disable_ips(intel_crtc_state);
+       if (IS_HASWELL(dev_priv) && crtc_state->ips_enabled &&
+           (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
+               hsw_disable_ips(crtc_state);
                reenable_ips = true;
        }
 
-       intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
-       I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+       crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+       I915_WRITE(GAMMA_MODE(crtc->pipe), GAMMA_MODE_MODE_8BIT);
 
        i9xx_load_luts(crtc_state);
 
        if (reenable_ips)
-               hsw_enable_ips(intel_crtc_state);
+               hsw_enable_ips(crtc_state);
 }
 
-static void bdw_load_degamma_lut(struct drm_crtc_state *state)
+static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
        uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
 
        I915_WRITE(PREC_PAL_INDEX(pipe),
                   PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
 
-       if (state->degamma_lut) {
-               struct drm_color_lut *lut = state->degamma_lut->data;
+       if (crtc_state->base.degamma_lut) {
+               struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
 
                for (i = 0; i < lut_size; i++) {
                        uint32_t word =
@@ -419,10 +414,10 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
        }
 }
 
-static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
+static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
        uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 
        WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
@@ -432,8 +427,8 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
                   PAL_PREC_AUTO_INCREMENT |
                   offset);
 
-       if (state->gamma_lut) {
-               struct drm_color_lut *lut = state->gamma_lut->data;
+       if (crtc_state->base.gamma_lut) {
+               struct drm_color_lut *lut = crtc_state->base.gamma_lut->data;
 
                for (i = 0; i < lut_size; i++) {
                        uint32_t word =
@@ -467,22 +462,21 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
 }
 
 /* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(struct drm_crtc_state *state)
+static void broadwell_load_luts(struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
 
-       if (crtc_state_is_legacy_gamma(state)) {
-               haswell_load_luts(state);
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
+               haswell_load_luts(crtc_state);
                return;
        }
 
-       bdw_load_degamma_lut(state);
-       bdw_load_gamma_lut(state,
+       bdw_load_degamma_lut(crtc_state);
+       bdw_load_gamma_lut(crtc_state,
                           INTEL_INFO(dev_priv)->color.degamma_lut_size);
 
-       intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+       crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
        I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
        POSTING_READ(GAMMA_MODE(pipe));
 
@@ -493,10 +487,10 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
        I915_WRITE(PREC_PAL_INDEX(pipe), 0);
 }
 
-static void glk_load_degamma_lut(struct drm_crtc_state *state)
+static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
-       enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
        const uint32_t lut_size = 33;
        uint32_t i;
 
@@ -523,49 +517,46 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
                I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
 }
 
-static void glk_load_luts(struct drm_crtc_state *state)
+static void glk_load_luts(struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc_state->base.crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
+       enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
 
-       glk_load_degamma_lut(state);
+       glk_load_degamma_lut(crtc_state);
 
-       if (crtc_state_is_legacy_gamma(state)) {
-               haswell_load_luts(state);
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
+               haswell_load_luts(crtc_state);
                return;
        }
 
-       bdw_load_gamma_lut(state, 0);
+       bdw_load_gamma_lut(crtc_state, 0);
 
-       intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
+       crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
        I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
        POSTING_READ(GAMMA_MODE(pipe));
 }
 
 /* Loads the palette/gamma unit for the CRTC on CherryView. */
-static void cherryview_load_luts(struct drm_crtc_state *state)
+static void cherryview_load_luts(struct intel_crtc_state *crtc_state)
 {
-       struct drm_crtc *crtc = state->crtc;
+       struct drm_crtc *crtc = crtc_state->base.crtc;
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        struct drm_color_lut *lut;
        uint32_t i, lut_size;
        uint32_t word0, word1;
 
-       if (crtc_state_is_legacy_gamma(state)) {
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
                /* Turn off degamma/gamma on CGM block. */
                I915_WRITE(CGM_PIPE_MODE(pipe),
-                          (state->ctm ? CGM_PIPE_MODE_CSC : 0));
-               i9xx_load_luts_internal(crtc, state->gamma_lut,
-                                       to_intel_crtc_state(state));
+                          (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0));
+               i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
                return;
        }
 
-       if (state->degamma_lut) {
-               lut = state->degamma_lut->data;
+       if (crtc_state->base.degamma_lut) {
+               lut = crtc_state->base.degamma_lut->data;
                lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
                for (i = 0; i < lut_size; i++) {
                        /* Write LUT in U0.14 format. */
@@ -579,8 +570,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
                }
        }
 
-       if (state->gamma_lut) {
-               lut = state->gamma_lut->data;
+       if (crtc_state->base.gamma_lut) {
+               lut = crtc_state->base.gamma_lut->data;
                lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
                for (i = 0; i < lut_size; i++) {
                        /* Write LUT in U0.10 format. */
@@ -595,29 +586,28 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
        }
 
        I915_WRITE(CGM_PIPE_MODE(pipe),
-                  (state->ctm ? CGM_PIPE_MODE_CSC : 0) |
-                  (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
-                  (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
+                  (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0) |
+                  (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+                  (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
 
        /*
         * Also program a linear LUT in the legacy block (behind the
         * CGM block).
         */
-       i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
+       i9xx_load_luts_internal(crtc_state, NULL);
 }
 
-void intel_color_load_luts(struct drm_crtc_state *crtc_state)
+void intel_color_load_luts(struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc_state->crtc->dev;
+       struct drm_device *dev = crtc_state->base.crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->display.load_luts(crtc_state);
 }
 
-int intel_color_check(struct drm_crtc *crtc,
-                     struct drm_crtc_state *crtc_state)
+int intel_color_check(struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
        size_t gamma_length, degamma_length;
 
        degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
@@ -627,10 +617,10 @@ int intel_color_check(struct drm_crtc *crtc,
         * We allow both degamma & gamma luts at the right size or
         * NULL.
         */
-       if ((!crtc_state->degamma_lut ||
-            drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
-           (!crtc_state->gamma_lut ||
-            drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
+       if ((!crtc_state->base.degamma_lut ||
+            drm_color_lut_size(crtc_state->base.degamma_lut) == degamma_length) &&
+           (!crtc_state->base.gamma_lut ||
+            drm_color_lut_size(crtc_state->base.gamma_lut) == gamma_length))
                return 0;
 
        /*
@@ -643,11 +633,11 @@ int intel_color_check(struct drm_crtc *crtc,
        return -EINVAL;
 }
 
-void intel_color_init(struct drm_crtc *crtc)
+void intel_color_init(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       drm_mode_crtc_set_gamma_size(crtc, 256);
+       drm_mode_crtc_set_gamma_size(&crtc->base, 256);
 
        if (IS_CHERRYVIEW(dev_priv)) {
                dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
@@ -669,7 +659,7 @@ void intel_color_init(struct drm_crtc *crtc)
        /* Enable color management support when we have degamma & gamma LUTs. */
        if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
            INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
-               drm_crtc_enable_color_mgmt(crtc,
+               drm_crtc_enable_color_mgmt(&crtc->base,
                                           INTEL_INFO(dev_priv)->color.degamma_lut_size,
                                           true,
                                           INTEL_INFO(dev_priv)->color.gamma_lut_size);
index 37d2c644f4b8b4d6fd441535bedc5ae729db0841..ee16758747c5d1af85d3251442993edf85eb0685 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/i2c.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
-#include <drm/drmP.h>
 #include "intel_drv.h"
 #include "i915_drv.h"
 
index 2e0fd9927db254faffd16a1ef6be655b2fe03b66..e73458f693a502ae2cb502424c18527206dfd0e3 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
@@ -322,7 +321,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
                 * DAC limit supposedly 355 MHz.
                 */
                max_clock = 270000;
-       else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
+       else if (IS_GEN_RANGE(dev_priv, 3, 4))
                max_clock = 400000;
        else
                max_clock = 350000;
@@ -667,7 +666,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
        /* Set the border color to purple. */
        I915_WRITE(bclrpat_reg, 0x500050);
 
-       if (!IS_GEN2(dev_priv)) {
+       if (!IS_GEN(dev_priv, 2)) {
                uint32_t pipeconf = I915_READ(pipeconf_reg);
                I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
                POSTING_READ(pipeconf_reg);
@@ -982,7 +981,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
        else
                crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-       if (IS_GEN2(dev_priv))
+       if (IS_GEN(dev_priv, 2))
                connector->interlace_allowed = 0;
        else
                connector->interlace_allowed = 1;
index fd06d1fd39d3236300c42d6e43244f7b91ca15fc..b1ac89b514c1676d60554613bde56d28c2b53b72 100644 (file)
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
        { 0x2, 0x7F, 0x3F, 0x00, 0x00 },        /* 400   400      0.0   */
 };
 
-struct icl_combo_phy_ddi_buf_trans {
-       u32 dw2_swing_select;
-       u32 dw2_swing_scalar;
-       u32 dw4_scaling;
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0xB, 0x70, 0x0018 },  /* 600         0.0   */
-       { 0xB, 0x70, 0x3015 },  /* 600         3.5   */
-       { 0xB, 0x70, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x00, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x00, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
-};
-
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0x4, 0x98, 0x0018 },  /* 600         0.0   */
-       { 0x4, 0x98, 0x3015 },  /* 600         3.5   */
-       { 0x4, 0x98, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x76, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x76, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
+/* icl_combo_phy_ddi_translations */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
+                                               /* NT mV Trans mV db    */
+       { 0xA, 0x35, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
+       { 0xA, 0x4F, 0x37, 0x00, 0x08 },        /* 350   500      3.1   */
+       { 0xC, 0x71, 0x2F, 0x00, 0x10 },        /* 350   700      6.0   */
+       { 0x6, 0x7F, 0x2B, 0x00, 0x14 },        /* 350   900      8.2   */
+       { 0xA, 0x4C, 0x3F, 0x00, 0x00 },        /* 500   500      0.0   */
+       { 0xC, 0x73, 0x34, 0x00, 0x0B },        /* 500   700      2.9   */
+       { 0x6, 0x7F, 0x2F, 0x00, 0x10 },        /* 500   900      5.1   */
+       { 0xC, 0x6C, 0x3C, 0x00, 0x03 },        /* 650   700      0.6   */
+       { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   900      3.5   */
+       { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 900   900      0.0   */
 };
 
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
+                                               /* NT mV Trans mV db    */
+       { 0x0, 0x7F, 0x3F, 0x00, 0x00 },        /* 200   200      0.0   */
+       { 0x8, 0x7F, 0x38, 0x00, 0x07 },        /* 200   250      1.9   */
+       { 0x1, 0x7F, 0x33, 0x00, 0x0C },        /* 200   300      3.5   */
+       { 0x9, 0x7F, 0x31, 0x00, 0x0E },        /* 200   350      4.9   */
+       { 0x8, 0x7F, 0x3F, 0x00, 0x00 },        /* 250   250      0.0   */
+       { 0x1, 0x7F, 0x38, 0x00, 0x07 },        /* 250   300      1.6   */
+       { 0x9, 0x7F, 0x35, 0x00, 0x0A },        /* 250   350      2.9   */