Merge tag 'driver-core-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / mlx5 / main.c
index 059db06104450504c7c283ccec1b908a5454644f..51100350b688058dc782306a57fa81abd907612f 100644 (file)
@@ -67,6 +67,7 @@
 #include <rdma/uverbs_std_types.h>
 #include <rdma/mlx5_user_ioctl_verbs.h>
 #include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem_odp.h>
 
 #define UVERBS_MODULE_NAME mlx5_ib
 #include <rdma/uverbs_named_ioctl.h>
@@ -693,21 +694,6 @@ static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
        get_atomic_caps(dev, atomic_size_qp, props);
 }
 
-static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
-                              struct ib_device_attr *props)
-{
-       u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
-
-       get_atomic_caps(dev, atomic_size_qp, props);
-}
-
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
-{
-       struct ib_device_attr props = {};
-
-       get_atomic_caps_dc(dev, &props);
-       return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
-}
 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
                                        __be64 *sys_image_guid)
 {
@@ -844,8 +830,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
        if (uhw->outlen && uhw->outlen < resp_len)
                return -EINVAL;
-       else
-               resp.response_length = resp_len;
+
+       resp.response_length = resp_len;
 
        if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
                return -EINVAL;
@@ -1011,6 +997,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
        props->max_pi_fast_reg_page_list_len =
                props->max_fast_reg_page_list_len / 2;
+       props->max_sgl_rd =
+               MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
        get_atomic_caps_qp(dev, props);
        props->masked_atomic_cap   = IB_ATOMIC_NONE;
        props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -1031,7 +1019,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        if (MLX5_CAP_GEN(mdev, cd))
                props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
 
-       if (!mlx5_core_is_pf(mdev))
+       if (mlx5_core_is_vf(mdev))
                props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
 
        if (mlx5_ib_port_link_layer(ibdev, 1) ==
@@ -1161,8 +1149,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                                MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
                        resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
                                MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
-                       resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
-                               MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+                       if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
+                               resp.striding_rq_caps
+                                       .min_single_wqe_log_num_of_strides =
+                                       MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+                       else
+                               resp.striding_rq_caps
+                                       .min_single_wqe_log_num_of_strides =
+                                       MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
                        resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
                                MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
                        resp.striding_rq_caps.supported_qpts =
@@ -1808,7 +1802,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
                return -EINVAL;
 
        resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
-       if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+       if (dev->wc_support)
                resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
        resp.cache_line_size = cache_line_size();
        resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
@@ -2168,7 +2162,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
        mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
 
        err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
-                               prot);
+                               prot, NULL);
        if (err) {
                mlx5_ib_err(dev,
                            "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
@@ -2210,7 +2204,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
              PAGE_SHIFT) +
              page_idx;
        return rdma_user_mmap_io(context, vma, pfn, map_size,
-                                pgprot_writecombine(vma->vm_page_prot));
+                                pgprot_writecombine(vma->vm_page_prot),
+                                NULL);
 }
 
 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2248,7 +2243,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
                        PAGE_SHIFT;
                return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
                                         PAGE_SIZE,
-                                        pgprot_noncached(vma->vm_page_prot));
+                                        pgprot_noncached(vma->vm_page_prot),
+                                        NULL);
        case MLX5_IB_MMAP_CLOCK_INFO:
                return mlx5_ib_mmap_clock_info_page(dev, vma, context);
 
@@ -5145,8 +5141,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
        immutable->pkey_tbl_len = attr.pkey_tbl_len;
        immutable->gid_tbl_len = attr.gid_tbl_len;
        immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
-       if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
-               immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
 
        return 0;
 }
@@ -5249,11 +5244,9 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
 {
        int err;
 
-       if (MLX5_CAP_GEN(dev->mdev, roce)) {
-               err = mlx5_nic_vport_enable_roce(dev->mdev);
-               if (err)
-                       return err;
-       }
+       err = mlx5_nic_vport_enable_roce(dev->mdev);
+       if (err)
+               return err;
 
        err = mlx5_eth_lag_init(dev);
        if (err)
@@ -5262,8 +5255,7 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
        return 0;
 
 err_disable_roce:
-       if (MLX5_CAP_GEN(dev->mdev, roce))
-               mlx5_nic_vport_disable_roce(dev->mdev);
+       mlx5_nic_vport_disable_roce(dev->mdev);
 
        return err;
 }
@@ -5271,8 +5263,7 @@ err_disable_roce:
 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
 {
        mlx5_eth_lag_cleanup(dev);
-       if (MLX5_CAP_GEN(dev->mdev, roce))
-               mlx5_nic_vport_disable_roce(dev->mdev);
+       mlx5_nic_vport_disable_roce(dev->mdev);
 }
 
 struct mlx5_ib_counter {
@@ -6113,11 +6104,10 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
 {
        mlx5_ib_cleanup_multiport_master(dev);
-       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
-               srcu_barrier(&dev->mr_srcu);
-               cleanup_srcu_struct(&dev->mr_srcu);
-       }
+       WARN_ON(!xa_empty(&dev->odp_mkeys));
+       cleanup_srcu_struct(&dev->odp_srcu);
 
+       WARN_ON(!xa_empty(&dev->sig_mrs));
        WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
 }
 
@@ -6169,15 +6159,15 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
        mutex_init(&dev->cap_mask_mutex);
        INIT_LIST_HEAD(&dev->qp_list);
        spin_lock_init(&dev->reset_flow_resource_lock);
+       xa_init(&dev->odp_mkeys);
+       xa_init(&dev->sig_mrs);
 
        spin_lock_init(&dev->dm.lock);
        dev->dm.dev = mdev;
 
-       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
-               err = init_srcu_struct(&dev->mr_srcu);
-               if (err)
-                       goto err_mp;
-       }
+       err = init_srcu_struct(&dev->odp_srcu);
+       if (err)
+               goto err_mp;
 
        return 0;
 
@@ -6237,6 +6227,9 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
        .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
        .drain_rq = mlx5_ib_drain_rq,
        .drain_sq = mlx5_ib_drain_sq,
+       .enable_driver = mlx5_ib_enable_driver,
+       .fill_res_entry = mlx5_ib_fill_res_entry,
+       .fill_stat_entry = mlx5_ib_fill_stat_entry,
        .get_dev_fw_str = get_dev_fw_str,
        .get_dma_mr = mlx5_ib_get_dma_mr,
        .get_link_layer = mlx5_ib_port_link_layer,
@@ -6283,6 +6276,7 @@ static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
 
 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
        .get_vf_config = mlx5_ib_get_vf_config,
+       .get_vf_guid = mlx5_ib_get_vf_guid,
        .get_vf_stats = mlx5_ib_get_vf_stats,
        .set_vf_guid = mlx5_ib_set_vf_guid,
        .set_vf_link_state = mlx5_ib_set_vf_link_state,
@@ -6412,7 +6406,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
        .query_port = mlx5_ib_rep_query_port,
 };
 
-static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
 {
        ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
        return 0;
@@ -6452,7 +6446,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
        mlx5_remove_netdev_notifier(dev, port_num);
 }
 
-static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
 {
        struct mlx5_core_dev *mdev = dev->mdev;
        enum rdma_link_layer ll;
@@ -6468,7 +6462,7 @@ static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
        return err;
 }
 
-static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
 {
        mlx5_ib_stage_common_roce_cleanup(dev);
 }
@@ -6678,6 +6672,18 @@ static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
        }
 }
 
+int mlx5_ib_enable_driver(struct ib_device *dev)
+{
+       struct mlx5_ib_dev *mdev = to_mdev(dev);
+       int ret;
+
+       ret = mlx5_ib_test_wc(mdev);
+       mlx5_ib_dbg(mdev, "Write-Combining %s",
+                   mdev->wc_support ? "supported" : "not supported");
+
+       return ret;
+}
+
 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
                      const struct mlx5_ib_profile *profile,
                      int stage)
@@ -6775,7 +6781,7 @@ static const struct mlx5_ib_profile pf_profile = {
                     mlx5_ib_stage_delay_drop_cleanup),
 };
 
-const struct mlx5_ib_profile uplink_rep_profile = {
+const struct mlx5_ib_profile raw_eth_profile = {
        STAGE_CREATE(MLX5_IB_STAGE_INIT,
                     mlx5_ib_stage_init_init,
                     mlx5_ib_stage_init_cleanup),
@@ -6786,11 +6792,11 @@ const struct mlx5_ib_profile uplink_rep_profile = {
                     mlx5_ib_stage_caps_init,
                     NULL),
        STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
-                    mlx5_ib_stage_rep_non_default_cb,
+                    mlx5_ib_stage_raw_eth_non_default_cb,
                     NULL),
        STAGE_CREATE(MLX5_IB_STAGE_ROCE,
-                    mlx5_ib_stage_rep_roce_init,
-                    mlx5_ib_stage_rep_roce_cleanup),
+                    mlx5_ib_stage_raw_eth_roce_init,
+                    mlx5_ib_stage_raw_eth_roce_cleanup),
        STAGE_CREATE(MLX5_IB_STAGE_SRQ,
                     mlx5_init_srq_table,
                     mlx5_cleanup_srq_table),
@@ -6866,6 +6872,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
 
 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 {
+       const struct mlx5_ib_profile *profile;
        enum rdma_link_layer ll;
        struct mlx5_ib_dev *dev;
        int port_type_cap;
@@ -6901,7 +6908,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->mdev = mdev;
        dev->num_ports = num_ports;
 
-       return __mlx5_ib_add(dev, &pf_profile);
+       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
+               profile = &raw_eth_profile;
+       else
+               profile = &pf_profile;
+
+       return __mlx5_ib_add(dev, profile);
 }
 
 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)