powerpc/powernv: Move npu struct from pnv_phb to pci_controller
authorAlexey Kardashevskiy <aik@ozlabs.ru>
Wed, 19 Dec 2018 08:52:16 +0000 (19:52 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 21 Dec 2018 05:20:46 +0000 (16:20 +1100)
The powernv PCI code stores NPU data in the pnv_phb struct. The latter
is referenced by pci_controller::private_data. We are going to have NPU2
support in the pseries platform as well but it does not store any
private_data in in the pci_controller struct; and even if it did,
it would be a different data structure.

This makes npu a pointer and stores it one level higher in
the pci_controller struct.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/platforms/powernv/npu-dma.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.h

index 94d449031b181c89a00b34360fa49203fdc69e54..aee4fcc2499021b7ac268fcfc13ae33b61bf225c 100644 (file)
@@ -129,6 +129,7 @@ struct pci_controller {
 #endif /* CONFIG_PPC64 */
 
        void *private_data;
+       struct npu *npu;
 };
 
 /* These are used for config access before all the PCI probing
index 00f1eff807b022778874cacb713234034177fc37..0bc8b6912eba53532850b8f27c984926e3a5eeae 100644 (file)
@@ -326,6 +326,25 @@ struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
        return gpe;
 }
 
+/*
+ * NPU2 ATS
+ */
+/* Maximum possible number of ATSD MMIO registers per NPU */
+#define NV_NMMU_ATSD_REGS 8
+
+/* An NPU descriptor, valid for POWER9 only */
+struct npu {
+       int index;
+       __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
+       unsigned int mmio_atsd_count;
+
+       /* Bitmask for MMIO register usage */
+       unsigned long mmio_atsd_usage;
+
+       /* Do we need to explicitly flush the nest mmu? */
+       bool nmmu_flush;
+};
+
 /* Maximum number of nvlinks per npu */
 #define NV_MAX_LINKS 6
 
@@ -477,7 +496,6 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
        int i, j;
        struct npu *npu;
        struct pci_dev *npdev;
-       struct pnv_phb *nphb;
 
        for (i = 0; i <= max_npu2_index; i++) {
                mmio_atsd_reg[i].reg = -1;
@@ -492,8 +510,7 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
                        if (!npdev)
                                continue;
 
-                       nphb = pci_bus_to_host(npdev->bus)->private_data;
-                       npu = &nphb->npu;
+                       npu = pci_bus_to_host(npdev->bus)->npu;
                        mmio_atsd_reg[i].npu = npu;
                        mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
                        while (mmio_atsd_reg[i].reg < 0) {
@@ -661,6 +678,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
        struct pnv_phb *nphb;
        struct npu *npu;
        struct npu_context *npu_context;
+       struct pci_controller *hose;
 
        /*
         * At present we don't support GPUs connected to multiple NPUs and I'm
@@ -688,8 +706,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
                return ERR_PTR(-EINVAL);
        }
 
-       nphb = pci_bus_to_host(npdev->bus)->private_data;
-       npu = &nphb->npu;
+       hose = pci_bus_to_host(npdev->bus);
+       nphb = hose->private_data;
+       npu = hose->npu;
 
        /*
         * Setup the NPU context table for a particular GPU. These need to be
@@ -763,7 +782,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
         */
        WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
 
-       if (!nphb->npu.nmmu_flush) {
+       if (!npu->nmmu_flush) {
                /*
                 * If we're not explicitly flushing ourselves we need to mark
                 * the thread for global flushes
@@ -801,6 +820,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
        struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
        struct device_node *nvlink_dn;
        u32 nvlink_index;
+       struct pci_controller *hose;
 
        if (WARN_ON(!npdev))
                return;
@@ -808,8 +828,9 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
        if (!firmware_has_feature(FW_FEATURE_OPAL))
                return;
 
-       nphb = pci_bus_to_host(npdev->bus)->private_data;
-       npu = &nphb->npu;
+       hose = pci_bus_to_host(npdev->bus);
+       nphb = hose->private_data;
+       npu = hose->npu;
        nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
        if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
                                                        &nvlink_index)))
@@ -887,9 +908,15 @@ int pnv_npu2_init(struct pnv_phb *phb)
        struct pci_dev *gpdev;
        static int npu_index;
        uint64_t rc = 0;
+       struct pci_controller *hose = phb->hose;
+       struct npu *npu;
+       int ret;
+
+       npu = kzalloc(sizeof(*npu), GFP_KERNEL);
+       if (!npu)
+               return -ENOMEM;
 
-       phb->npu.nmmu_flush =
-               of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
+       npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
        for_each_child_of_node(phb->hose->dn, dn) {
                gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
                if (gpdev) {
@@ -903,18 +930,29 @@ int pnv_npu2_init(struct pnv_phb *phb)
                }
        }
 
-       for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
+       for (i = 0; !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
                                                        i, &mmio_atsd); i++)
-               phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
+               npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
 
-       pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
-       phb->npu.mmio_atsd_count = i;
-       phb->npu.mmio_atsd_usage = 0;
+       pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
+       npu->mmio_atsd_count = i;
+       npu->mmio_atsd_usage = 0;
        npu_index++;
-       if (WARN_ON(npu_index >= NV_MAX_NPUS))
-               return -ENOSPC;
+       if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
+               ret = -ENOSPC;
+               goto fail_exit;
+       }
        max_npu2_index = npu_index;
-       phb->npu.index = npu_index;
+       npu->index = npu_index;
+       hose->npu = npu;
 
        return 0;
+
+fail_exit:
+       for (i = 0; i < npu->mmio_atsd_count; ++i)
+               iounmap(npu->mmio_atsd_regs[i]);
+
+       kfree(npu);
+
+       return ret;
 }
index c39def4e1a4254c2a55df55586084cca9210a695..8e3149bbd4e20f529fb2aefeb59b5c71325ae103 100644 (file)
@@ -1278,7 +1278,7 @@ static void pnv_pci_ioda_setup_PEs(void)
                        pnv_ioda_reserve_pe(phb, 0);
                        pnv_ioda_setup_npu_PEs(hose->bus);
                        if (phb->model == PNV_PHB_MODEL_NPU2)
-                               pnv_npu2_init(phb);
+                               WARN_ON_ONCE(pnv_npu2_init(phb));
                }
                if (phb->type == PNV_PHB_NPU_OCAPI) {
                        bus = hose->bus;
index 25dff6b59100b8b6bb2f694f8ec53bcf2231ced0..771938840eff91bcec01f5ca635db3d20570246b 100644 (file)
@@ -8,9 +8,6 @@
 
 struct pci_dn;
 
-/* Maximum possible number of ATSD MMIO registers per NPU */
-#define NV_NMMU_ATSD_REGS 8
-
 enum pnv_phb_type {
        PNV_PHB_IODA1           = 0,
        PNV_PHB_IODA2           = 1,
@@ -174,19 +171,6 @@ struct pnv_phb {
        unsigned int            diag_data_size;
        u8                      *diag_data;
 
-       /* Nvlink2 data */
-       struct npu {
-               int index;
-               __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
-               unsigned int mmio_atsd_count;
-
-               /* Bitmask for MMIO register usage */
-               unsigned long mmio_atsd_usage;
-
-               /* Do we need to explicitly flush the nest mmu? */
-               bool nmmu_flush;
-       } npu;
-
        int p2p_target_count;
 };