crypto: inside-secure - irq balance
authorSven Auhagen <Sven.Auhagen@voleatech.de>
Tue, 21 Jul 2020 04:37:59 +0000 (06:37 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 31 Jul 2020 08:08:59 +0000 (18:08 +1000)
Balance the irqs of the inside secure driver over all
available cpus.
Currently all interrupts are handled by the first CPU.

From my testing with IPSec AES-GCM 256
on my MCbin with 4 Cores I get a 50% speed increase:

Before the patch: 99.73 Kpps
With the patch: 151.25 Kpps

Signed-off-by: Sven Auhagen <sven.auhagen@voleatech.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/inside-secure/safexcel.h

index 2cb53fbae841dc29a4f8016f5e894467392487a7..fa7398e688587daebc323f56a455f2c5087d4d96 100644 (file)
@@ -1135,11 +1135,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
 
 static int safexcel_request_ring_irq(void *pdev, int irqid,
                                     int is_pci_dev,
+                                    int ring_id,
                                     irq_handler_t handler,
                                     irq_handler_t threaded_handler,
                                     struct safexcel_ring_irq_data *ring_irq_priv)
 {
-       int ret, irq;
+       int ret, irq, cpu;
        struct device *dev;
 
        if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
@@ -1177,6 +1178,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
                return ret;
        }
 
+       /* Set affinity */
+       cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
+       irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
        return irq;
 }
 
@@ -1611,6 +1616,7 @@ static int safexcel_probe_generic(void *pdev,
                irq = safexcel_request_ring_irq(pdev,
                                                EIP197_IRQ_NUMBER(i, is_pci_dev),
                                                is_pci_dev,
+                                               i,
                                                safexcel_irq_ring,
                                                safexcel_irq_ring_thread,
                                                ring_irq);
@@ -1619,6 +1625,7 @@ static int safexcel_probe_generic(void *pdev,
                        return irq;
                }
 
+               priv->ring[i].irq = irq;
                priv->ring[i].work_data.priv = priv;
                priv->ring[i].work_data.ring = i;
                INIT_WORK(&priv->ring[i].work_data.work,
@@ -1756,8 +1763,10 @@ static int safexcel_remove(struct platform_device *pdev)
        clk_disable_unprepare(priv->reg_clk);
        clk_disable_unprepare(priv->clk);
 
-       for (i = 0; i < priv->config.rings; i++)
+       for (i = 0; i < priv->config.rings; i++) {
+               irq_set_affinity_hint(priv->ring[i].irq, NULL);
                destroy_workqueue(priv->ring[i].workqueue);
+       }
 
        return 0;
 }
index 94016c505abb87e25cd5ddb765f64f3115237b89..7c5fe382d2720e2a07e16a7543ae96e55f9eb3c0 100644 (file)
@@ -707,6 +707,9 @@ struct safexcel_ring {
         */
        struct crypto_async_request *req;
        struct crypto_async_request *backlog;
+
+       /* irq of this ring */
+       int irq;
 };
 
 /* EIP integration context flags */