irqchip/gic-v3-its: Move pending table allocation to init time
authorMarc Zyngier <marc.zyngier@arm.com>
Fri, 27 Jul 2018 12:38:54 +0000 (13:38 +0100)
committerMarc Zyngier <marc.zyngier@arm.com>
Tue, 2 Oct 2018 09:37:30 +0000 (10:37 +0100)
Pending tables for the redistributors are currently allocated
one at a time as each CPU boots. This is causing some grief
for Linux/RT (allocation from within a CPU hotplug notifier is
frown upon).

Let's move this allocation to take place at init time, when we
only have a single CPU. It means we're allocating memory for CPUs
that are not online yet, but most system will boot all of their
CPUs anyway, so that's not completely wasted.

Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Tested-by: Bhupesh Sharma <bhsharma@redhat.com>
Tested-by: Lei Zhang <zhang.lei@jp.fujitsu.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
drivers/irqchip/irq-gic-v3-its.c
include/linux/irqchip/arm-gic-v3.h

index cb59a4d513c9032e043b2940102c2364e4ad2c77..02196682821bd22815c6f060daf06249d27f52da 100644 (file)
@@ -173,6 +173,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
 static DEFINE_IDA(its_vpeid_ida);
 
 #define gic_data_rdist()               (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_cpu(cpu)                (per_cpu_ptr(gic_rdists->rdist, cpu))
 #define gic_data_rdist_rd_base()       (gic_data_rdist()->rd_base)
 #define gic_data_rdist_vlpi_base()     (gic_data_rdist_rd_base() + SZ_128K)
 
@@ -1625,7 +1626,7 @@ static void its_free_prop_table(struct page *prop_page)
                   get_order(LPI_PROPBASE_SZ));
 }
 
-static int __init its_alloc_lpi_tables(void)
+static int __init its_setup_lpi_prop_table(void)
 {
        phys_addr_t paddr;
 
@@ -1944,30 +1945,47 @@ static void its_free_pending_table(struct page *pt)
        free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
 }
 
-static void its_cpu_init_lpis(void)
+static int __init allocate_lpi_tables(void)
 {
-       void __iomem *rbase = gic_data_rdist_rd_base();
-       struct page *pend_page;
-       u64 val, tmp;
+       int err, cpu;
 
-       /* If we didn't allocate the pending table yet, do it now */
-       pend_page = gic_data_rdist()->pend_page;
-       if (!pend_page) {
-               phys_addr_t paddr;
+       err = its_setup_lpi_prop_table();
+       if (err)
+               return err;
+
+       /*
+        * We allocate all the pending tables anyway, as we may have a
+        * mix of RDs that have had LPIs enabled, and some that
+        * don't. We'll free the unused ones as each CPU comes online.
+        */
+       for_each_possible_cpu(cpu) {
+               struct page *pend_page;
 
                pend_page = its_allocate_pending_table(GFP_NOWAIT);
                if (!pend_page) {
-                       pr_err("Failed to allocate PENDBASE for CPU%d\n",
-                              smp_processor_id());
-                       return;
+                       pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
+                       return -ENOMEM;
                }
 
-               paddr = page_to_phys(pend_page);
-               pr_info("CPU%d: using LPI pending table @%pa\n",
-                       smp_processor_id(), &paddr);
-               gic_data_rdist()->pend_page = pend_page;
+               gic_data_rdist_cpu(cpu)->pend_page = pend_page;
        }
 
+       return 0;
+}
+
+static void its_cpu_init_lpis(void)
+{
+       void __iomem *rbase = gic_data_rdist_rd_base();
+       struct page *pend_page;
+       phys_addr_t paddr;
+       u64 val, tmp;
+
+       if (gic_data_rdist()->lpi_enabled)
+               return;
+
+       pend_page = gic_data_rdist()->pend_page;
+       paddr = page_to_phys(pend_page);
+
        /* set PROPBASE */
        val = (page_to_phys(gic_rdists->prop_page) |
               GICR_PROPBASER_InnerShareable |
@@ -2019,6 +2037,10 @@ static void its_cpu_init_lpis(void)
 
        /* Make sure the GIC has seen the above */
        dsb(sy);
+       gic_data_rdist()->lpi_enabled = true;
+       pr_info("GICv3: CPU%d: using LPI pending table @%pa\n",
+               smp_processor_id(),
+               &paddr);
 }
 
 static void its_cpu_init_collection(struct its_node *its)
@@ -3497,16 +3519,6 @@ static int redist_disable_lpis(void)
        u64 timeout = USEC_PER_SEC;
        u64 val;
 
-       /*
-        * If coming via a CPU hotplug event, we don't need to disable
-        * LPIs before trying to re-enable them. They are already
-        * configured and all is well in the world. Detect this case
-        * by checking the allocation of the pending table for the
-        * current CPU.
-        */
-       if (gic_data_rdist()->pend_page)
-               return 0;
-
        if (!gic_rdists_supports_plpis()) {
                pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
                return -ENXIO;
@@ -3516,7 +3528,18 @@ static int redist_disable_lpis(void)
        if (!(val & GICR_CTLR_ENABLE_LPIS))
                return 0;
 
-       pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
+       /*
+        * If coming via a CPU hotplug event, we don't need to disable
+        * LPIs before trying to re-enable them. They are already
+        * configured and all is well in the world.
+        */
+       if (gic_data_rdist()->lpi_enabled)
+               return 0;
+
+       /*
+        * From that point on, we only try to do some damage control.
+        */
+       pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
                smp_processor_id());
        add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
 
@@ -3772,7 +3795,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
        }
 
        gic_rdists = rdists;
-       err = its_alloc_lpi_tables();
+
+       err = allocate_lpi_tables();
        if (err)
                return err;
 
index 8bdbb5f29494b494b47dadc8d57943eeb4011612..266093e845bb66e1da5af0dfaba245947f214fb9 100644 (file)
@@ -585,6 +585,7 @@ struct rdists {
                void __iomem    *rd_base;
                struct page     *pend_page;
                phys_addr_t     phys_base;
+               bool            lpi_enabled;
        } __percpu              *rdist;
        struct page             *prop_page;
        u64                     flags;