Merge branches 'arm/rockchip', 'arm/exynos', 'arm/smmu', 'x86/vt-d', 'x86/amd', ...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
index 7f05f309e93596778851fb280fa4c8bd068828c3..c5e1d0ac75f909f843dd0397ad41b85eeb26a164 100644 (file)
@@ -635,73 +635,16 @@ static int lancer_wait_ready(struct be_adapter *adapter)
        for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_RDY_MASK)
-                       break;
-
-               msleep(1000);
-       }
-
-       if (i == SLIPORT_READY_TIMEOUT)
-               return sliport_status ? : -1;
-
-       return 0;
-}
-
-static bool lancer_provisioning_error(struct be_adapter *adapter)
-{
-       u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
-
-       sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
-       if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
-               sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
-               sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
-
-               if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
-                   sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
-                       return true;
-       }
-       return false;
-}
-
-int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
-       int status;
-       u32 sliport_status, err, reset_needed;
-       bool resource_error;
+                       return 0;
 
-       resource_error = lancer_provisioning_error(adapter);
-       if (resource_error)
-               return -EAGAIN;
+               if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
+                   !(sliport_status & SLIPORT_STATUS_RN_MASK))
+                       return -EIO;
 
-       status = lancer_wait_ready(adapter);
-       if (!status) {
-               sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
-               err = sliport_status & SLIPORT_STATUS_ERR_MASK;
-               reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
-               if (err && reset_needed) {
-                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
-                                 adapter->db + SLIPORT_CONTROL_OFFSET);
-
-                       /* check if adapter has corrected the error */
-                       status = lancer_wait_ready(adapter);
-                       sliport_status = ioread32(adapter->db +
-                                                 SLIPORT_STATUS_OFFSET);
-                       sliport_status &= (SLIPORT_STATUS_ERR_MASK |
-                                               SLIPORT_STATUS_RN_MASK);
-                       if (status || sliport_status)
-                               status = -1;
-               } else if (err || reset_needed) {
-                       status = -1;
-               }
+               msleep(1000);
        }
-       /* Stop error recovery if error is not recoverable.
-        * No resource error is temporary errors and will go away
-        * when PF provisions resources.
-        */
-       resource_error = lancer_provisioning_error(adapter);
-       if (resource_error)
-               status = -EAGAIN;
 
-       return status;
+       return sliport_status ? : -1;
 }
 
 int be_fw_wait_ready(struct be_adapter *adapter)
@@ -720,6 +663,10 @@ int be_fw_wait_ready(struct be_adapter *adapter)
        }
 
        do {
+               /* There's no means to poll POST state on BE2/3 VFs */
+               if (BEx_chip(adapter) && be_virtfn(adapter))
+                       return 0;
+
                stage = be_POST_stage_get(adapter);
                if (stage == POST_STAGE_ARMFW_RDY)
                        return 0;
@@ -734,7 +681,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
 
 err:
        dev_err(dev, "POST timeout; stage=%#x\n", stage);
-       return -1;
+       return -ETIMEDOUT;
 }
 
 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
@@ -1773,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
        total_size = buf_len;
 
        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
-       get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                             get_fat_cmd.size,
-                                             &get_fat_cmd.dma);
+       get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            get_fat_cmd.size,
+                                            &get_fat_cmd.dma, GFP_ATOMIC);
        if (!get_fat_cmd.va) {
                dev_err(&adapter->pdev->dev,
                        "Memory allocation failure while reading FAT data\n");
@@ -1820,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
                log_offset += buf_size;
        }
 err:
-       pci_free_consistent(adapter->pdev, get_fat_cmd.size,
-                           get_fat_cmd.va, get_fat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+                         get_fat_cmd.va, get_fat_cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -2123,16 +2070,12 @@ int be_cmd_reset_function(struct be_adapter *adapter)
        int status;
 
        if (lancer_chip(adapter)) {
+               iowrite32(SLI_PORT_CONTROL_IP_MASK,
+                         adapter->db + SLIPORT_CONTROL_OFFSET);
                status = lancer_wait_ready(adapter);
-               if (!status) {
-                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
-                                 adapter->db + SLIPORT_CONTROL_OFFSET);
-                       status = lancer_test_and_set_rdy_state(adapter);
-               }
-               if (status) {
+               if (status)
                        dev_err(&adapter->pdev->dev,
                                "Adapter in non recoverable error\n");
-               }
                return status;
        }
 
@@ -2272,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
                return -EINVAL;
 
        cmd.size = sizeof(struct be_cmd_resp_port_type);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
                return -ENOMEM;
        }
-       memset(cmd.va, 0, cmd.size);
 
        spin_lock_bh(&adapter->mcc_lock);
 
@@ -2302,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
        }
 err:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
@@ -2777,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                goto err;
        }
        cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -2811,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                                BE_SUPPORTED_SPEED_1GBPS;
                }
        }
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -2862,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
 
        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
-       attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
-                                             &attribs_cmd.dma);
+       attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            attribs_cmd.size,
+                                            &attribs_cmd.dma, GFP_ATOMIC);
        if (!attribs_cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
@@ -2890,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (attribs_cmd.va)
-               pci_free_consistent(adapter->pdev, attribs_cmd.size,
-                                   attribs_cmd.va, attribs_cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
+                                 attribs_cmd.va, attribs_cmd.dma);
        return status;
 }
 
@@ -3029,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
        memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
        get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
-       get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                                  get_mac_list_cmd.size,
-                                                  &get_mac_list_cmd.dma);
+       get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                                 get_mac_list_cmd.size,
+                                                 &get_mac_list_cmd.dma,
+                                                 GFP_ATOMIC);
 
        if (!get_mac_list_cmd.va) {
                dev_err(&adapter->pdev->dev,
@@ -3075,7 +3021,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
                mac_count = resp->true_mac_count + resp->pseudo_mac_count;
                /* Mac list returned could contain one or more active mac_ids
-                * or one or more true or pseudo permanant mac addresses.
+                * or one or more true or pseudo permanent mac addresses.
                 * If an active mac_id is present, return first active mac_id
                 * found.
                 */
@@ -3104,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
 out:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
-                           get_mac_list_cmd.va, get_mac_list_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+                         get_mac_list_cmd.va, get_mac_list_cmd.dma);
        return status;
 }
 
@@ -3130,7 +3076,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
        int status;
        bool pmac_valid = false;
 
-       memset(mac, 0, ETH_ALEN);
+       eth_zero_addr(mac);
 
        if (BEx_chip(adapter)) {
                if (be_physfn(adapter))
@@ -3158,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
-       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
-                                   &cmd.dma, GFP_KERNEL);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3348,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
@@ -3383,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 
 }
@@ -3397,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
 
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
        if (!extfat_cmd.va)
                return -ENOMEM;
 
@@ -3420,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
 
        status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
 err:
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
        return status;
 }
 
@@ -3434,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
 
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
 
        if (!extfat_cmd.va) {
                dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -3453,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
                                level = cfgs->module[0].trace_lvl[j].dbg_lvl;
                }
        }
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
 err:
        return level;
 }
@@ -3631,12 +3581,12 @@ static void be_copy_nic_desc(struct be_resources *res,
        res->max_rss_qs = le16_to_cpu(desc->rssq_count);
        res->max_rx_qs = le16_to_cpu(desc->rq_count);
        res->max_evt_qs = le16_to_cpu(desc->eq_count);
+       res->max_cq_count = le16_to_cpu(desc->cq_count);
+       res->max_iface_count = le16_to_cpu(desc->iface_count);
+       res->max_mcc_count = le16_to_cpu(desc->mcc_count);
        /* Clear flags that driver is not interested in */
        res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
                                BE_IF_CAP_FLAGS_WANT;
-       /* Need 1 RXQ as the default RXQ */
-       if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
-               res->max_rss_qs -= 1;
 }
 
 /* Uses Mbox */
@@ -3652,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -3692,13 +3643,14 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
 /* Will use MBOX only if MCCQ has not been created */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
-                             struct be_resources *res, u8 domain)
+                             struct be_resources *res, u8 query, u8 domain)
 {
        struct be_cmd_resp_get_profile_config *resp;
        struct be_cmd_req_get_profile_config *req;
@@ -3708,12 +3660,13 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        struct be_nic_res_desc *nic;
        struct be_mcc_wrb wrb = {0};
        struct be_dma_mem cmd;
-       u32 desc_count;
+       u16 desc_count;
        int status;
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3727,12 +3680,19 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                req->hdr.version = 1;
        req->type = ACTIVE_PROFILE_TYPE;
 
+       /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
+        * descriptors with all bits set to "1" for the fields which can be
+        * modified using SET_PROFILE_CONFIG cmd.
+        */
+       if (query == RESOURCE_MODIFIABLE)
+               req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
+
        status = be_cmd_notify_wait(adapter, &wrb);
        if (status)
                goto err;
 
        resp = cmd.va;
-       desc_count = le32_to_cpu(resp->desc_count);
+       desc_count = le16_to_cpu(resp->desc_count);
 
        pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
                                desc_count);
@@ -3752,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                res->vf_if_cap_flags = vf_res->cap_flags;
 err:
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
@@ -3767,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3783,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
        status = be_cmd_notify_wait(adapter, &wrb);
 
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
@@ -3857,23 +3820,80 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
                                         1, version, domain);
 }
 
+static void be_fill_vf_res_template(struct be_adapter *adapter,
+                                   struct be_resources pool_res,
+                                   u16 num_vfs, u16 num_vf_qs,
+                                   struct be_nic_res_desc *nic_vft)
+{
+       u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
+       struct be_resources res_mod = {0};
+
+       /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+        * which are modifiable using SET_PROFILE_CONFIG cmd.
+        */
+       be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
+
+       /* If RSS IFACE capability flags are modifiable for a VF, set the
+        * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+        * more than 1 RSSQ is available for a VF.
+        * Otherwise, provision only 1 queue pair for VF.
+        */
+       if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+               nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+               if (num_vf_qs > 1) {
+                       vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+                       if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+                               vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+               } else {
+                       vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+                                            BE_IF_FLAGS_DEFQ_RSS);
+               }
+
+               nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
+       } else {
+               num_vf_qs = 1;
+       }
+
+       nic_vft->rq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->txq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
+                                       (num_vfs + 1));
+
+       /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+        * among the PF and it's VFs, if the fields are changeable
+        */
+       if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+               nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
+                                                        (num_vfs + 1));
+
+       if (res_mod.max_vlans == FIELD_MODIFIABLE)
+               nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
+                                                 (num_vfs + 1));
+
+       if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+               nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
+                                                  (num_vfs + 1));
+
+       if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+               nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
+                                                (num_vfs + 1));
+}
+
 int be_cmd_set_sriov_config(struct be_adapter *adapter,
-                           struct be_resources res, u16 num_vfs)
+                           struct be_resources pool_res, u16 num_vfs,
+                           u16 num_vf_qs)
 {
        struct {
                struct be_pcie_res_desc pcie;
                struct be_nic_res_desc nic_vft;
        } __packed desc;
-       u16 vf_q_count;
-
-       if (BEx_chip(adapter) || lancer_chip(adapter))
-               return 0;
 
        /* PF PCIE descriptor */
        be_reset_pcie_desc(&desc.pcie);
        desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
        desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
-       desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+       desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
        desc.pcie.pf_num = adapter->pdev->devfn;
        desc.pcie.sriov_state = num_vfs ? 1 : 0;
        desc.pcie.num_vfs = cpu_to_le16(num_vfs);
@@ -3882,32 +3902,12 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
        be_reset_nic_desc(&desc.nic_vft);
        desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
        desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
-       desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
-                               (1 << NOSV_SHIFT);
+       desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
        desc.nic_vft.pf_num = adapter->pdev->devfn;
        desc.nic_vft.vf_num = 0;
 
-       if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
-               /* If number of VFs requested is 8 less than max supported,
-                * assign 8 queue pairs to the PF and divide the remaining
-                * resources evenly among the VFs
-                */
-               if (num_vfs < (be_max_vfs(adapter) - 8))
-                       vf_q_count = (res.max_rss_qs - 8) / num_vfs;
-               else
-                       vf_q_count = res.max_rss_qs / num_vfs;
-
-               desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
-               desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
-               desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
-               desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
-       } else {
-               desc.nic_vft.txq_count = cpu_to_le16(1);
-               desc.nic_vft.rq_count = cpu_to_le16(1);
-               desc.nic_vft.rssq_count = cpu_to_le16(0);
-               /* One CQ for each TX, RX and MCCQ */
-               desc.nic_vft.cq_count = cpu_to_le16(3);
-       }
+       be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
+                               &desc.nic_vft);
 
        return be_cmd_set_profile_config(adapter, &desc,
                                         2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);