net/mlx5: Only cancel recovery work when cleaning up device
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / main.c
index 7b4c339a8a9a233edd0be5b4dd04a09b975810fa..d01e9f21d4691ea497aa7ea0666c83e330c078bb 100644 (file)
@@ -503,6 +503,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
                 to_fw_pkey_sz(dev, 128));
 
+       /* Check log_max_qp from HCA caps to set in current profile */
+       if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+               mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
+                              profile[prof_sel].log_max_qp,
+                              MLX5_CAP_GEN_MAX(dev, log_max_qp));
+               profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+       }
        if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
                MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
                         prof->log_max_qp);
@@ -557,7 +564,7 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
-cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
 {
        u32 timer_h, timer_h1, timer_l;
 
@@ -567,7 +574,7 @@ cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
        if (timer_h != timer_h1) /* wrap around */
                timer_l = ioread32be(&dev->iseg->internal_timer_l);
 
-       return (cycle_t)timer_l | (cycle_t)timer_h1 << 32;
+       return (u64)timer_l | (u64)timer_h1 << 32;
 }
 
 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
@@ -575,7 +582,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        struct mlx5_priv *priv  = &mdev->priv;
        struct msix_entry *msix = priv->msix_arr;
        int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-       int numa_node           = priv->numa_node;
        int err;
 
        if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -583,7 +589,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
                return -ENOMEM;
        }
 
-       cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+       cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
                        priv->irq_info[i].mask);
 
        err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
@@ -1189,6 +1195,9 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 {
        int err = 0;
 
+       if (cleanup)
+               mlx5_drain_health_wq(dev);
+
        mutex_lock(&dev->intf_state_mutex);
        if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
                dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
@@ -1351,7 +1360,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
 
        mlx5_enter_error_state(dev);
        mlx5_unload_one(dev, priv, false);
-       /* In case of kernel call save the pci state and drain health wq */
+       /* In case of kernel call save the pci state and drain the health wq */
        if (state) {
                pci_save_state(pdev);
                mlx5_drain_health_wq(dev);