net/mlx5: Configure cache line size for start and end padding
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / main.c
index d01e9f21d4691ea497aa7ea0666c83e330c078bb..c4242a4e81309f0d90a0cae8bdfc09fd39da5649 100644 (file)
@@ -152,6 +152,26 @@ static struct mlx5_profile profile[] = {
                        .size   = 8,
                        .limit  = 4
                },
+               .mr_cache[16]   = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+               .mr_cache[17]   = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+               .mr_cache[18]   = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+               .mr_cache[19]   = {
+                       .size   = 4,
+                       .limit  = 2
+               },
+               .mr_cache[20]   = {
+                       .size   = 4,
+                       .limit  = 2
+               },
        },
 };
 
@@ -398,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
 
        switch (cap_mode) {
        case HCA_CAP_OPMOD_GET_MAX:
-               memcpy(dev->hca_caps_max[cap_type], hca_caps,
+               memcpy(dev->caps.hca_max[cap_type], hca_caps,
                       MLX5_UN_SZ_BYTES(hca_cap_union));
                break;
        case HCA_CAP_OPMOD_GET_CUR:
-               memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+               memcpy(dev->caps.hca_cur[cap_type], hca_caps,
                       MLX5_UN_SZ_BYTES(hca_cap_union));
                break;
        default:
@@ -493,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
 
        set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
                                   capability);
-       memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+       memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
               MLX5_ST_SZ_BYTES(cmd_hca_cap));
 
        mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
@@ -517,8 +537,18 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        /* disable cmdif checksum */
        MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
+       /* If the HCA supports 4K UARs use it */
+       if (MLX5_CAP_GEN_MAX(dev, uar_4k))
+               MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
+
        MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
 
+       if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
+               MLX5_SET(cmd_hca_cap,
+                        set_hca_cap,
+                        cache_line_128byte,
+                        cache_line_size() == 128 ? 1 : 0);
+
        err = set_caps(dev, set_ctx, set_sz,
                       MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
 
@@ -739,7 +769,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
                snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(dev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        name, &dev->priv.uuari.uars[0]);
+                                        name, MLX5_EQ_TYPE_COMP);
                if (err) {
                        kfree(eq);
                        goto clean;
@@ -807,7 +837,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
                return 0;
        }
 
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 
@@ -899,8 +929,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
                goto out;
        }
 
-       MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
-
        err = mlx5_init_cq_table(dev);
        if (err) {
                dev_err(&pdev->dev, "failed to initialize cq table\n");
@@ -1079,8 +1107,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                goto err_cleanup_once;
        }
 
-       err = mlx5_alloc_uuars(dev, &priv->uuari);
-       if (err) {
+       dev->priv.uar = mlx5_get_uars_page(dev);
+       if (!dev->priv.uar) {
                dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
                goto err_disable_msix;
        }
@@ -1088,7 +1116,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        err = mlx5_start_eqs(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
-               goto err_free_uar;
+               goto err_put_uars;
        }
 
        err = alloc_comp_eqs(dev);
@@ -1154,8 +1182,8 @@ err_affinity_hints:
 err_stop_eqs:
        mlx5_stop_eqs(dev);
 
-err_free_uar:
-       mlx5_free_uuars(dev, &priv->uuari);
+err_put_uars:
+       mlx5_put_uars_page(dev, priv->uar);
 
 err_disable_msix:
        mlx5_disable_msix(dev);
@@ -1218,7 +1246,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
-       mlx5_free_uuars(dev, &priv->uuari);
+       mlx5_put_uars_page(dev, priv->uar);
        mlx5_disable_msix(dev);
        if (cleanup)
                mlx5_cleanup_once(dev);
@@ -1284,10 +1312,24 @@ static int init_one(struct pci_dev *pdev,
        spin_lock_init(&priv->ctx_lock);
        mutex_init(&dev->pci_status_mutex);
        mutex_init(&dev->intf_state_mutex);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       err = init_srcu_struct(&priv->pfault_srcu);
+       if (err) {
+               dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
+                       err);
+               goto clean_dev;
+       }
+#endif
+       mutex_init(&priv->bfregs.reg_head.lock);
+       mutex_init(&priv->bfregs.wc_head.lock);
+       INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
+       INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
+
        err = mlx5_pci_init(dev, priv);
        if (err) {
                dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
-               goto clean_dev;
+               goto clean_srcu;
        }
 
        err = mlx5_health_init(dev);
@@ -1304,9 +1346,7 @@ static int init_one(struct pci_dev *pdev,
                goto clean_health;
        }
 
-       err = request_module_nowait(MLX5_IB_MOD);
-       if (err)
-               pr_info("failed request module on %s\n", MLX5_IB_MOD);
+       request_module_nowait(MLX5_IB_MOD);
 
        err = devlink_register(devlink, &pdev->dev);
        if (err)
@@ -1321,7 +1361,11 @@ clean_health:
        mlx5_health_cleanup(dev);
 close_pci:
        mlx5_pci_close(dev, priv);
+clean_srcu:
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       cleanup_srcu_struct(&priv->pfault_srcu);
 clean_dev:
+#endif
        pci_set_drvdata(pdev, NULL);
        devlink_free(devlink);
 
@@ -1346,6 +1390,9 @@ static void remove_one(struct pci_dev *pdev)
        mlx5_pagealloc_cleanup(dev);
        mlx5_health_cleanup(dev);
        mlx5_pci_close(dev, priv);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       cleanup_srcu_struct(&priv->pfault_srcu);
+#endif
        pci_set_drvdata(pdev, NULL);
        devlink_free(devlink);
 }