Merge tag 'drm-intel-gt-next-2022-09-16' of git://anongit.freedesktop.org/drm/drm...
[sfrench/cifs-2.6.git] / drivers / misc / mei / hw-me.c
index 3a95fe7d4e3306e7c1d8ad079596156ab18b1fe1..9e2f781c6ed527b779ac5945d1c3c36a188caf65 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
  */
 
@@ -10,6 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/pm_runtime.h>
 #include <linux/sizes.h>
+#include <linux/delay.h>
 
 #include "mei_dev.h"
 #include "hbm.h"
@@ -327,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev)
  */
 static void mei_me_intr_enable(struct mei_device *dev)
 {
-       u32 hcsr = mei_hcsr_read(dev);
+       u32 hcsr;
+
+       if (mei_me_hw_use_polling(to_me_hw(dev)))
+               return;
 
-       hcsr |= H_CSR_IE_MASK;
+       hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
        mei_hcsr_set(dev, hcsr);
 }
 
@@ -354,6 +358,9 @@ static void mei_me_synchronize_irq(struct mei_device *dev)
 {
        struct mei_me_hw *hw = to_me_hw(dev);
 
+       if (mei_me_hw_use_polling(hw))
+               return;
+
        synchronize_irq(hw->irq);
 }
 
@@ -380,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev)
 {
        u32 hcsr = mei_hcsr_read(dev);
 
-       hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
+       if (!mei_me_hw_use_polling(to_me_hw(dev)))
+               hcsr |= H_CSR_IE_MASK;
+
+       hcsr |=  H_IG | H_RDY;
        mei_hcsr_set(dev, hcsr);
 }
 
@@ -423,6 +433,29 @@ static bool mei_me_hw_is_resetting(struct mei_device *dev)
        return (mecsr & ME_RST_HRA) == ME_RST_HRA;
 }
 
+/**
+ * mei_gsc_pxp_check - check for gsc firmware entering pxp mode
+ *
+ * @dev: the device structure
+ */
+static void mei_gsc_pxp_check(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 fwsts5 = 0;
+
+       if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
+               return;
+
+       hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
+       trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
+       if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
+               dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
+               dev->pxp_mode = MEI_DEV_PXP_READY;
+       } else {
+               dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
+       }
+}
+
 /**
  * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
  *  or timeout is reached
@@ -435,13 +468,15 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_hw_ready,
                        dev->recvd_hw_ready,
-                       mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
+                       dev->timeouts.hw_ready);
        mutex_lock(&dev->device_lock);
        if (!dev->recvd_hw_ready) {
                dev_err(dev->dev, "wait hw ready failed\n");
                return -ETIME;
        }
 
+       mei_gsc_pxp_check(dev);
+
        mei_me_hw_reset_release(dev);
        dev->recvd_hw_ready = false;
        return 0;
@@ -697,7 +732,6 @@ static void mei_me_pg_unset(struct mei_device *dev)
 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
 {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
        int ret;
 
        dev->pg_event = MEI_PG_EVENT_WAIT;
@@ -708,7 +742,8 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
 
        if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
@@ -734,7 +769,6 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
 {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
        int ret;
 
        if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
@@ -746,7 +780,8 @@ static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
 
 reply:
@@ -762,7 +797,8 @@ reply:
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
 
        if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
@@ -877,8 +913,6 @@ static u32 mei_me_d0i3_unset(struct mei_device *dev)
 static int mei_me_d0i3_enter_sync(struct mei_device *dev)
 {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
-       unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
        int ret;
        u32 reg;
 
@@ -900,7 +934,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+               dev->pg_event == MEI_PG_EVENT_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
 
        if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
@@ -920,7 +955,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+               dev->timeouts.d0i3);
        mutex_lock(&dev->device_lock);
 
        if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@ -980,7 +1016,6 @@ on:
 static int mei_me_d0i3_exit_sync(struct mei_device *dev)
 {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
        int ret;
        u32 reg;
 
@@ -1003,7 +1038,8 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+               dev->timeouts.d0i3);
        mutex_lock(&dev->device_lock);
 
        if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@ -1176,7 +1212,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
 
        hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
 
-       if (!intr_enable)
+       if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
                hcsr &= ~H_CSR_IE_MASK;
 
        dev->recvd_hw_ready = false;
@@ -1259,7 +1295,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
 
        /* check if ME wants a reset */
        if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
-               dev_warn(dev->dev, "FW not ready: resetting.\n");
+               dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
+                        dev->dev_state, dev->pxp_mode);
                if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
                    dev->dev_state == MEI_DEV_POWER_DOWN)
                        mei_cl_all_disconnect(dev);
@@ -1331,6 +1368,66 @@ end:
 }
 EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
 
+#define MEI_POLLING_TIMEOUT_ACTIVE 100
+#define MEI_POLLING_TIMEOUT_IDLE   500
+
+/**
+ * mei_me_polling_thread - interrupt register polling thread
+ *
+ * The thread monitors the interrupt source register and calls
+ * mei_me_irq_thread_handler() to handle the firmware
+ * input.
+ *
+ * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
+ * in case there was an event, in idle case the polling
+ * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
+ * up to MEI_POLLING_TIMEOUT_IDLE.
+ *
+ * @_dev: mei device
+ *
+ * Return: always 0
+ */
+int mei_me_polling_thread(void *_dev)
+{
+       struct mei_device *dev = _dev;
+       irqreturn_t irq_ret;
+       long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+
+       dev_dbg(dev->dev, "kernel thread is running\n");
+       while (!kthread_should_stop()) {
+               struct mei_me_hw *hw = to_me_hw(dev);
+               u32 hcsr;
+
+               wait_event_timeout(hw->wait_active,
+                                  hw->is_active || kthread_should_stop(),
+                                  msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
+
+               if (kthread_should_stop())
+                       break;
+
+               hcsr = mei_hcsr_read(dev);
+               if (me_intr_src(hcsr)) {
+                       polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+                       irq_ret = mei_me_irq_thread_handler(1, dev);
+                       if (irq_ret != IRQ_HANDLED)
+                               dev_err(dev->dev, "irq_ret %d\n", irq_ret);
+               } else {
+                       /*
+                        * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
+                        * up to MEI_POLLING_TIMEOUT_IDLE
+                        */
+                       polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
+                                                   MEI_POLLING_TIMEOUT_ACTIVE,
+                                                   MEI_POLLING_TIMEOUT_IDLE);
+               }
+
+               schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mei_me_polling_thread);
+
 static const struct mei_hw_ops mei_me_hw_ops = {
 
        .trc_status = mei_me_trc_status,
@@ -1636,11 +1733,12 @@ EXPORT_SYMBOL_GPL(mei_me_get_cfg);
  *
  * @parent: device associated with physical device (pci/platform)
  * @cfg: per device generation config
+ * @slow_fw: configure longer timeouts as FW is slow
  *
  * Return: The mei_device pointer on success, NULL on failure.
  */
 struct mei_device *mei_me_dev_init(struct device *parent,
-                                  const struct mei_cfg *cfg)
+                                  const struct mei_cfg *cfg, bool slow_fw)
 {
        struct mei_device *dev;
        struct mei_me_hw *hw;
@@ -1655,7 +1753,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
        for (i = 0; i < DMA_DSCR_NUM; i++)
                dev->dr_dscr[i].size = cfg->dma_size[i];
 
-       mei_device_init(dev, parent, &mei_me_hw_ops);
+       mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
        hw->cfg = cfg;
 
        dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;