2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/debugfs.h>
47 #include <linux/kmod.h>
48 #include <linux/mlx5/mlx5_ifc.h>
49 #include <linux/mlx5/vport.h>
50 #ifdef CONFIG_RFS_ACCEL
51 #include <linux/cpu_rmap.h>
53 #include <linux/version.h>
54 #include <net/devlink.h>
55 #include "mlx5_core.h"
64 #include "fpga/core.h"
65 #include "en_accel/ipsec.h"
66 #include "lib/clock.h"
67 #include "lib/vxlan.h"
68 #include "lib/geneve.h"
69 #include "lib/devcom.h"
70 #include "lib/pci_vsc.h"
71 #include "diag/fw_tracer.h"
73 #include "lib/hv_vhca.h"
74 #include "diag/rsc_dump.h"
75 #include "sf/vhca_event.h"
76 #include "sf/dev/dev.h"
80 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
81 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
82 MODULE_LICENSE("Dual BSD/GPL");
84 unsigned int mlx5_core_debug_mask;
85 module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
86 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
88 static unsigned int prof_sel = MLX5_DEFAULT_PROF;
89 module_param_named(prof_sel, prof_sel, uint, 0444);
90 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
92 static u32 sw_owner_id[4];
93 #define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
94 static DEFINE_IDA(sw_vhca_ida);
97 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
98 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
101 #define LOG_MAX_SUPPORTED_QPS 0xff
103 static struct mlx5_profile profile[] = {
108 .mask = MLX5_PROF_MASK_QP_SIZE,
112 .mask = MLX5_PROF_MASK_QP_SIZE |
113 MLX5_PROF_MASK_MR_CACHE,
114 .log_max_qp = LOG_MAX_SUPPORTED_QPS,
182 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
185 unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
186 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
191 fw_initializing = ioread32be(&dev->iseg->initializing);
192 if (!(fw_initializing >> 31))
194 if (time_after(jiffies, end) ||
195 test_and_clear_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
199 if (warn_time_mili && time_after(jiffies, warn)) {
200 mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
201 jiffies_to_msecs(end - warn) / 1000, fw_initializing);
202 warn = jiffies + msecs_to_jiffies(warn_time_mili);
204 msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
210 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
212 int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
214 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
215 int remaining_size = driver_ver_sz;
218 if (!MLX5_CAP_GEN(dev, driver_version))
221 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
223 strncpy(string, "Linux", remaining_size);
225 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
226 strncat(string, ",", remaining_size);
228 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
229 strncat(string, KBUILD_MODNAME, remaining_size);
231 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
232 strncat(string, ",", remaining_size);
234 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
236 snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
237 LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
238 LINUX_VERSION_SUBLEVEL);
241 MLX5_SET(set_driver_version_in, in, opcode,
242 MLX5_CMD_OP_SET_DRIVER_VERSION);
244 mlx5_cmd_exec_in(dev, set_driver_version, in);
247 static int set_dma_caps(struct pci_dev *pdev)
251 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
253 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
254 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
256 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
261 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
265 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
267 struct pci_dev *pdev = dev->pdev;
270 mutex_lock(&dev->pci_status_mutex);
271 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
272 err = pci_enable_device(pdev);
274 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
276 mutex_unlock(&dev->pci_status_mutex);
281 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
283 struct pci_dev *pdev = dev->pdev;
285 mutex_lock(&dev->pci_status_mutex);
286 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
287 pci_disable_device(pdev);
288 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
290 mutex_unlock(&dev->pci_status_mutex);
293 static int request_bar(struct pci_dev *pdev)
297 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
298 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
302 err = pci_request_regions(pdev, KBUILD_MODNAME);
304 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
309 static void release_bar(struct pci_dev *pdev)
311 pci_release_regions(pdev);
314 struct mlx5_reg_host_endianness {
319 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
335 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
340 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
341 enum mlx5_cap_type cap_type,
342 enum mlx5_cap_mode cap_mode)
344 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
345 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
346 void *out, *hca_caps;
347 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
350 memset(in, 0, sizeof(in));
351 out = kzalloc(out_sz, GFP_KERNEL);
355 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
356 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
357 err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
360 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
361 cap_type, cap_mode, err);
365 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
368 case HCA_CAP_OPMOD_GET_MAX:
369 memcpy(dev->caps.hca[cap_type]->max, hca_caps,
370 MLX5_UN_SZ_BYTES(hca_cap_union));
372 case HCA_CAP_OPMOD_GET_CUR:
373 memcpy(dev->caps.hca[cap_type]->cur, hca_caps,
374 MLX5_UN_SZ_BYTES(hca_cap_union));
378 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
388 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
392 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
395 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
398 static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
400 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
401 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
402 return mlx5_cmd_exec_in(dev, set_hca_cap, in);
405 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
411 if (!MLX5_CAP_GEN(dev, atomic))
414 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
420 supported_atomic_req_8B_endianness_mode_1);
422 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
425 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
427 /* Set requestor to host endianness */
428 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
429 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
431 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
434 static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
440 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
441 !MLX5_CAP_GEN(dev, pg))
444 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
448 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
449 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
450 MLX5_ST_SZ_BYTES(odp_cap));
452 #define ODP_CAP_SET_MAX(dev, field) \
454 u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
457 MLX5_SET(odp_cap, set_hca_cap, field, _res); \
461 ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
462 ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
463 ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
464 ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
465 ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
466 ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
467 ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
468 ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
469 ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
470 ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
471 ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
472 ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
473 ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
474 ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
479 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
482 static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
484 struct devlink *devlink = priv_to_devlink(dev);
485 union devlink_param_value val;
488 err = devlink_param_driverinit_value_get(devlink,
489 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
493 mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
497 static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
502 if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
505 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
509 if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
510 !(dev->priv.sw_vhca_id > 0))
513 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
515 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
516 MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
517 MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
519 return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
522 static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
524 struct mlx5_profile *prof = &dev->profile;
529 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
533 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
535 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur,
536 MLX5_ST_SZ_BYTES(cmd_hca_cap));
538 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
539 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
541 /* we limit the size of the pkey table to 128 entries for now */
542 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
543 to_fw_pkey_sz(dev, 128));
545 /* Check log_max_qp from HCA caps to set in current profile */
546 if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
547 prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
548 } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
549 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
551 MLX5_CAP_GEN_MAX(dev, log_max_qp));
552 prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
554 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
555 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
558 /* disable cmdif checksum */
559 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
561 /* Enable 4K UAR only when HCA supports it and page size is bigger
564 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
565 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
567 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
569 if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
570 MLX5_SET(cmd_hca_cap,
573 cache_line_size() >= 128 ? 1 : 0);
575 if (MLX5_CAP_GEN_MAX(dev, dct))
576 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
578 if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
579 MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
581 if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
582 MLX5_SET(cmd_hca_cap,
585 MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
587 if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
588 MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
590 if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
591 MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
593 mlx5_vhca_state_cap_handle(dev, set_hca_cap);
595 if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
596 MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
597 MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
599 if (MLX5_CAP_GEN(dev, roce_rw_supported))
600 MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
602 max_uc_list = max_uc_list_get_devlink_param(dev);
604 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
607 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
610 /* Cached MLX5_CAP_GEN(dev, roce) can be out of sync this early in the
612 * In case RoCE cap is writable in FW and user/devlink requested to change the
613 * cap, we are yet to query the final state of the above cap.
614 * Hence, the need for this function.
618 * 1) RoCE cap is read only in FW and already disabled
620 * 2) RoCE cap is writable in FW and user/devlink requested it off.
622 * In any other case, return False.
624 static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
626 return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
627 (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
630 static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
635 if (is_roce_fw_disabled(dev))
638 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
642 if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
643 !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
646 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
647 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur,
648 MLX5_ST_SZ_BYTES(roce_cap));
649 MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
651 err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
655 static int set_hca_cap(struct mlx5_core_dev *dev)
657 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
661 set_ctx = kzalloc(set_sz, GFP_KERNEL);
665 err = handle_hca_cap(dev, set_ctx);
667 mlx5_core_err(dev, "handle_hca_cap failed\n");
671 memset(set_ctx, 0, set_sz);
672 err = handle_hca_cap_atomic(dev, set_ctx);
674 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
678 memset(set_ctx, 0, set_sz);
679 err = handle_hca_cap_odp(dev, set_ctx);
681 mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
685 memset(set_ctx, 0, set_sz);
686 err = handle_hca_cap_roce(dev, set_ctx);
688 mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
692 memset(set_ctx, 0, set_sz);
693 err = handle_hca_cap_2(dev, set_ctx);
695 mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
704 static int set_hca_ctrl(struct mlx5_core_dev *dev)
706 struct mlx5_reg_host_endianness he_in;
707 struct mlx5_reg_host_endianness he_out;
710 if (!mlx5_core_is_pf(dev))
713 memset(&he_in, 0, sizeof(he_in));
714 he_in.he = MLX5_SET_HOST_ENDIANNESS;
715 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
716 &he_out, sizeof(he_out),
717 MLX5_REG_HOST_ENDIANNESS, 0, 1);
721 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
725 /* Disable local_lb by default */
726 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
727 ret = mlx5_nic_vport_update_local_lb(dev, false);
732 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
734 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
736 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
737 MLX5_SET(enable_hca_in, in, function_id, func_id);
738 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
739 dev->caps.embedded_cpu);
740 return mlx5_cmd_exec_in(dev, enable_hca, in);
743 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
745 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
747 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
748 MLX5_SET(disable_hca_in, in, function_id, func_id);
749 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
750 dev->caps.embedded_cpu);
751 return mlx5_cmd_exec_in(dev, disable_hca, in);
754 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
756 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
757 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
761 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
762 err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
764 u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
765 u8 status = MLX5_GET(query_issi_out, query_out, status);
767 if (!status || syndrome == MLX5_DRIVER_SYND) {
768 mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
769 err, status, syndrome);
773 mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
778 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
780 if (sup_issi & (1 << 1)) {
781 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
783 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
784 MLX5_SET(set_issi_in, set_in, current_issi, 1);
785 err = mlx5_cmd_exec_in(dev, set_issi, set_in);
787 mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
795 } else if (sup_issi & (1 << 0) || !sup_issi) {
802 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
803 const struct pci_device_id *id)
807 mutex_init(&dev->pci_status_mutex);
808 pci_set_drvdata(dev->pdev, dev);
810 dev->bar_addr = pci_resource_start(pdev, 0);
812 err = mlx5_pci_enable_device(dev);
814 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
818 err = request_bar(pdev);
820 mlx5_core_err(dev, "error requesting BARs, aborting\n");
824 pci_set_master(pdev);
826 err = set_dma_caps(pdev);
828 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
832 if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
833 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
834 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
835 mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
837 dev->iseg_base = dev->bar_addr;
838 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
841 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
845 mlx5_pci_vsc_init(dev);
846 dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
850 pci_clear_master(dev->pdev);
851 release_bar(dev->pdev);
853 mlx5_pci_disable_device(dev);
857 static void mlx5_pci_close(struct mlx5_core_dev *dev)
859 /* health work might still be active, and it needs pci bar in
860 * order to know the NIC state. Therefore, drain the health WQ
861 * before removing the pci bars
863 mlx5_drain_health_wq(dev);
865 pci_clear_master(dev->pdev);
866 release_bar(dev->pdev);
867 mlx5_pci_disable_device(dev);
870 static int mlx5_init_once(struct mlx5_core_dev *dev)
874 dev->priv.devcom = mlx5_devcom_register_device(dev);
875 if (IS_ERR(dev->priv.devcom))
876 mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
879 err = mlx5_query_board_id(dev);
881 mlx5_core_err(dev, "query board id failed\n");
885 err = mlx5_irq_table_init(dev);
887 mlx5_core_err(dev, "failed to initialize irq table\n");
891 err = mlx5_eq_table_init(dev);
893 mlx5_core_err(dev, "failed to initialize eq\n");
894 goto err_irq_cleanup;
897 err = mlx5_events_init(dev);
899 mlx5_core_err(dev, "failed to initialize events\n");
903 err = mlx5_fw_reset_init(dev);
905 mlx5_core_err(dev, "failed to initialize fw reset events\n");
906 goto err_events_cleanup;
909 mlx5_cq_debugfs_init(dev);
911 mlx5_init_reserved_gids(dev);
913 mlx5_init_clock(dev);
915 dev->vxlan = mlx5_vxlan_create(dev);
916 dev->geneve = mlx5_geneve_create(dev);
918 err = mlx5_init_rl_table(dev);
920 mlx5_core_err(dev, "Failed to init rate limiting\n");
921 goto err_tables_cleanup;
924 err = mlx5_mpfs_init(dev);
926 mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
930 err = mlx5_sriov_init(dev);
932 mlx5_core_err(dev, "Failed to init sriov %d\n", err);
933 goto err_mpfs_cleanup;
936 err = mlx5_eswitch_init(dev);
938 mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
939 goto err_sriov_cleanup;
942 err = mlx5_fpga_init(dev);
944 mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
945 goto err_eswitch_cleanup;
948 err = mlx5_vhca_event_init(dev);
950 mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
951 goto err_fpga_cleanup;
954 err = mlx5_sf_hw_table_init(dev);
956 mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
957 goto err_sf_hw_table_cleanup;
960 err = mlx5_sf_table_init(dev);
962 mlx5_core_err(dev, "Failed to init SF table %d\n", err);
963 goto err_sf_table_cleanup;
966 err = mlx5_fs_core_alloc(dev);
968 mlx5_core_err(dev, "Failed to alloc flow steering\n");
972 dev->dm = mlx5_dm_create(dev);
974 mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
976 dev->tracer = mlx5_fw_tracer_create(dev);
977 dev->hv_vhca = mlx5_hv_vhca_create(dev);
978 dev->rsc_dump = mlx5_rsc_dump_create(dev);
983 mlx5_sf_table_cleanup(dev);
984 err_sf_table_cleanup:
985 mlx5_sf_hw_table_cleanup(dev);
986 err_sf_hw_table_cleanup:
987 mlx5_vhca_event_cleanup(dev);
989 mlx5_fpga_cleanup(dev);
991 mlx5_eswitch_cleanup(dev->priv.eswitch);
993 mlx5_sriov_cleanup(dev);
995 mlx5_mpfs_cleanup(dev);
997 mlx5_cleanup_rl_table(dev);
999 mlx5_geneve_destroy(dev->geneve);
1000 mlx5_vxlan_destroy(dev->vxlan);
1001 mlx5_cq_debugfs_cleanup(dev);
1002 mlx5_fw_reset_cleanup(dev);
1004 mlx5_events_cleanup(dev);
1006 mlx5_eq_table_cleanup(dev);
1008 mlx5_irq_table_cleanup(dev);
1010 mlx5_devcom_unregister_device(dev->priv.devcom);
1015 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
1017 mlx5_rsc_dump_destroy(dev);
1018 mlx5_hv_vhca_destroy(dev->hv_vhca);
1019 mlx5_fw_tracer_destroy(dev->tracer);
1020 mlx5_dm_cleanup(dev);
1021 mlx5_fs_core_free(dev);
1022 mlx5_sf_table_cleanup(dev);
1023 mlx5_sf_hw_table_cleanup(dev);
1024 mlx5_vhca_event_cleanup(dev);
1025 mlx5_fpga_cleanup(dev);
1026 mlx5_eswitch_cleanup(dev->priv.eswitch);
1027 mlx5_sriov_cleanup(dev);
1028 mlx5_mpfs_cleanup(dev);
1029 mlx5_cleanup_rl_table(dev);
1030 mlx5_geneve_destroy(dev->geneve);
1031 mlx5_vxlan_destroy(dev->vxlan);
1032 mlx5_cleanup_clock(dev);
1033 mlx5_cleanup_reserved_gids(dev);
1034 mlx5_cq_debugfs_cleanup(dev);
1035 mlx5_fw_reset_cleanup(dev);
1036 mlx5_events_cleanup(dev);
1037 mlx5_eq_table_cleanup(dev);
1038 mlx5_irq_table_cleanup(dev);
1039 mlx5_devcom_unregister_device(dev->priv.devcom);
1042 static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
1046 mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
1047 fw_rev_min(dev), fw_rev_sub(dev));
1049 /* Only PFs hold the relevant PCIe information for this query */
1050 if (mlx5_core_is_pf(dev))
1051 pcie_print_link_status(dev->pdev);
1053 /* wait for firmware to accept initialization segments configurations
1055 err = wait_fw_init(dev, timeout,
1056 mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
1058 mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
1063 err = mlx5_cmd_init(dev);
1065 mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
1069 mlx5_tout_query_iseg(dev);
1071 err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
1073 mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
1074 mlx5_tout_ms(dev, FW_INIT));
1075 goto err_cmd_cleanup;
1078 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
1080 err = mlx5_core_enable_hca(dev, 0);
1082 mlx5_core_err(dev, "enable hca failed\n");
1083 goto err_cmd_cleanup;
1086 err = mlx5_core_set_issi(dev);
1088 mlx5_core_err(dev, "failed to set issi\n");
1089 goto err_disable_hca;
1092 err = mlx5_satisfy_startup_pages(dev, 1);
1094 mlx5_core_err(dev, "failed to allocate boot pages\n");
1095 goto err_disable_hca;
1098 err = mlx5_tout_query_dtor(dev);
1100 mlx5_core_err(dev, "failed to read dtor\n");
1101 goto reclaim_boot_pages;
1104 err = set_hca_ctrl(dev);
1106 mlx5_core_err(dev, "set_hca_ctrl failed\n");
1107 goto reclaim_boot_pages;
1110 err = set_hca_cap(dev);
1112 mlx5_core_err(dev, "set_hca_cap failed\n");
1113 goto reclaim_boot_pages;
1116 err = mlx5_satisfy_startup_pages(dev, 0);
1118 mlx5_core_err(dev, "failed to allocate init pages\n");
1119 goto reclaim_boot_pages;
1122 err = mlx5_cmd_init_hca(dev, sw_owner_id);
1124 mlx5_core_err(dev, "init hca failed\n");
1125 goto reclaim_boot_pages;
1128 mlx5_set_driver_version(dev);
1130 err = mlx5_query_hca_caps(dev);
1132 mlx5_core_err(dev, "query hca failed\n");
1133 goto reclaim_boot_pages;
1136 mlx5_start_health_poll(dev);
1141 mlx5_reclaim_startup_pages(dev);
1143 mlx5_core_disable_hca(dev, 0);
1145 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1146 mlx5_cmd_cleanup(dev);
1151 static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
1155 mlx5_stop_health_poll(dev, boot);
1156 err = mlx5_cmd_teardown_hca(dev);
1158 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1161 mlx5_reclaim_startup_pages(dev);
1162 mlx5_core_disable_hca(dev, 0);
1163 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1164 mlx5_cmd_cleanup(dev);
1169 static int mlx5_load(struct mlx5_core_dev *dev)
1173 dev->priv.uar = mlx5_get_uars_page(dev);
1174 if (IS_ERR(dev->priv.uar)) {
1175 mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1176 err = PTR_ERR(dev->priv.uar);
1180 mlx5_events_start(dev);
1181 mlx5_pagealloc_start(dev);
1183 err = mlx5_irq_table_create(dev);
1185 mlx5_core_err(dev, "Failed to alloc IRQs\n");
1189 err = mlx5_eq_table_create(dev);
1191 mlx5_core_err(dev, "Failed to create EQs\n");
1195 err = mlx5_fw_tracer_init(dev->tracer);
1197 mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
1198 mlx5_fw_tracer_destroy(dev->tracer);
1202 mlx5_fw_reset_events_start(dev);
1203 mlx5_hv_vhca_init(dev->hv_vhca);
1205 err = mlx5_rsc_dump_init(dev);
1207 mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
1208 mlx5_rsc_dump_destroy(dev);
1209 dev->rsc_dump = NULL;
1212 err = mlx5_fpga_device_start(dev);
1214 mlx5_core_err(dev, "fpga device start failed %d\n", err);
1215 goto err_fpga_start;
1218 err = mlx5_fs_core_init(dev);
1220 mlx5_core_err(dev, "Failed to init flow steering\n");
1224 err = mlx5_core_set_hca_defaults(dev);
1226 mlx5_core_err(dev, "Failed to set hca defaults\n");
1230 mlx5_vhca_event_start(dev);
1232 err = mlx5_sf_hw_table_create(dev);
1234 mlx5_core_err(dev, "sf table create failed %d\n", err);
1238 err = mlx5_ec_init(dev);
1240 mlx5_core_err(dev, "Failed to init embedded CPU\n");
1244 mlx5_lag_add_mdev(dev);
1245 err = mlx5_sriov_attach(dev);
1247 mlx5_core_err(dev, "sriov init failed %d\n", err);
1251 mlx5_sf_dev_table_create(dev);
1256 mlx5_lag_remove_mdev(dev);
1257 mlx5_ec_cleanup(dev);
1259 mlx5_sf_hw_table_destroy(dev);
1261 mlx5_vhca_event_stop(dev);
1263 mlx5_fs_core_cleanup(dev);
1265 mlx5_fpga_device_stop(dev);
1267 mlx5_rsc_dump_cleanup(dev);
1268 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1269 mlx5_fw_reset_events_stop(dev);
1270 mlx5_fw_tracer_cleanup(dev->tracer);
1271 mlx5_eq_table_destroy(dev);
1273 mlx5_irq_table_destroy(dev);
1275 mlx5_pagealloc_stop(dev);
1276 mlx5_events_stop(dev);
1277 mlx5_put_uars_page(dev, dev->priv.uar);
1281 static void mlx5_unload(struct mlx5_core_dev *dev)
1283 mlx5_sf_dev_table_destroy(dev);
1284 mlx5_sriov_detach(dev);
1285 mlx5_eswitch_disable(dev->priv.eswitch);
1286 mlx5_lag_remove_mdev(dev);
1287 mlx5_ec_cleanup(dev);
1288 mlx5_sf_hw_table_destroy(dev);
1289 mlx5_vhca_event_stop(dev);
1290 mlx5_fs_core_cleanup(dev);
1291 mlx5_fpga_device_stop(dev);
1292 mlx5_rsc_dump_cleanup(dev);
1293 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1294 mlx5_fw_reset_events_stop(dev);
1295 mlx5_fw_tracer_cleanup(dev->tracer);
1296 mlx5_eq_table_destroy(dev);
1297 mlx5_irq_table_destroy(dev);
1298 mlx5_pagealloc_stop(dev);
1299 mlx5_events_stop(dev);
1300 mlx5_put_uars_page(dev, dev->priv.uar);
1303 int mlx5_init_one(struct mlx5_core_dev *dev)
1305 struct devlink *devlink = priv_to_devlink(dev);
1309 mutex_lock(&dev->intf_state_mutex);
1310 dev->state = MLX5_DEVICE_STATE_UP;
1312 err = mlx5_function_setup(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
1316 err = mlx5_init_once(dev);
1318 mlx5_core_err(dev, "sw objs init failed\n");
1319 goto function_teardown;
1322 err = mlx5_load(dev);
1326 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1328 err = mlx5_devlink_register(priv_to_devlink(dev));
1330 goto err_devlink_reg;
1332 err = mlx5_register_device(dev);
1336 mutex_unlock(&dev->intf_state_mutex);
1337 devl_unlock(devlink);
1341 mlx5_devlink_unregister(priv_to_devlink(dev));
1343 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1346 mlx5_cleanup_once(dev);
1348 mlx5_function_teardown(dev, true);
1350 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1351 mutex_unlock(&dev->intf_state_mutex);
1352 devl_unlock(devlink);
1356 void mlx5_uninit_one(struct mlx5_core_dev *dev)
1358 struct devlink *devlink = priv_to_devlink(dev);
1361 mutex_lock(&dev->intf_state_mutex);
1363 mlx5_unregister_device(dev);
1364 mlx5_devlink_unregister(priv_to_devlink(dev));
1366 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1367 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1369 mlx5_cleanup_once(dev);
1373 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1375 mlx5_cleanup_once(dev);
1376 mlx5_function_teardown(dev, true);
1378 mutex_unlock(&dev->intf_state_mutex);
1379 devl_unlock(devlink);
1382 int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
1387 devl_assert_locked(priv_to_devlink(dev));
1388 mutex_lock(&dev->intf_state_mutex);
1389 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1390 mlx5_core_warn(dev, "interface is up, NOP\n");
1393 /* remove any previous indication of internal error */
1394 dev->state = MLX5_DEVICE_STATE_UP;
1397 timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT);
1399 timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT);
1400 err = mlx5_function_setup(dev, timeout);
1404 err = mlx5_load(dev);
1408 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1410 err = mlx5_attach_device(dev);
1414 mutex_unlock(&dev->intf_state_mutex);
1418 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1421 mlx5_function_teardown(dev, false);
1423 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1425 mutex_unlock(&dev->intf_state_mutex);
1429 int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
1431 struct devlink *devlink = priv_to_devlink(dev);
1435 ret = mlx5_load_one_devl_locked(dev, recovery);
1436 devl_unlock(devlink);
1440 void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
1442 devl_assert_locked(priv_to_devlink(dev));
1443 mutex_lock(&dev->intf_state_mutex);
1445 mlx5_detach_device(dev);
1447 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1448 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1453 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1455 mlx5_function_teardown(dev, false);
1457 mutex_unlock(&dev->intf_state_mutex);
1460 void mlx5_unload_one(struct mlx5_core_dev *dev)
1462 struct devlink *devlink = priv_to_devlink(dev);
1465 mlx5_unload_one_devl_locked(dev);
1466 devl_unlock(devlink);
1469 static const int types[] = {
1472 MLX5_CAP_ETHERNET_OFFLOADS,
1473 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1477 MLX5_CAP_IPOIB_OFFLOADS,
1478 MLX5_CAP_FLOW_TABLE,
1479 MLX5_CAP_ESWITCH_FLOW_TABLE,
1481 MLX5_CAP_VECTOR_CALC,
1487 MLX5_CAP_VDPA_EMULATION,
1489 MLX5_CAP_PORT_SELECTION,
1490 MLX5_CAP_DEV_SHAMPO,
1493 static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
1498 for (i = 0; i < ARRAY_SIZE(types); i++) {
1500 kfree(dev->caps.hca[type]);
1504 static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev)
1506 struct mlx5_hca_cap *cap;
1510 for (i = 0; i < ARRAY_SIZE(types); i++) {
1511 cap = kzalloc(sizeof(*cap), GFP_KERNEL);
1515 dev->caps.hca[type] = cap;
1521 mlx5_hca_caps_free(dev);
1525 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
1527 struct mlx5_priv *priv = &dev->priv;
1530 memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
1531 INIT_LIST_HEAD(&priv->ctx_list);
1532 spin_lock_init(&priv->ctx_lock);
1533 lockdep_register_key(&dev->lock_key);
1534 mutex_init(&dev->intf_state_mutex);
1535 lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
1537 mutex_init(&priv->bfregs.reg_head.lock);
1538 mutex_init(&priv->bfregs.wc_head.lock);
1539 INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1540 INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1542 mutex_init(&priv->alloc_mutex);
1543 mutex_init(&priv->pgdir_mutex);
1544 INIT_LIST_HEAD(&priv->pgdir_list);
1546 priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
1547 priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
1549 INIT_LIST_HEAD(&priv->traps);
1551 err = mlx5_tout_init(dev);
1553 mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
1554 goto err_timeout_init;
1557 err = mlx5_health_init(dev);
1559 goto err_health_init;
1561 err = mlx5_pagealloc_init(dev);
1563 goto err_pagealloc_init;
1565 err = mlx5_adev_init(dev);
1569 err = mlx5_hca_caps_alloc(dev);
1573 /* The conjunction of sw_vhca_id with sw_owner_id will be a global
1574 * unique id per function which uses mlx5_core.
1575 * Those values are supplied to FW as part of the init HCA command to
1576 * be used by both driver and FW when it's applicable.
1578 dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
1581 if (dev->priv.sw_vhca_id < 0)
1582 mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
1583 dev->priv.sw_vhca_id);
1588 mlx5_adev_cleanup(dev);
1590 mlx5_pagealloc_cleanup(dev);
1592 mlx5_health_cleanup(dev);
1594 mlx5_tout_cleanup(dev);
1596 debugfs_remove(dev->priv.dbg.dbg_root);
1597 mutex_destroy(&priv->pgdir_mutex);
1598 mutex_destroy(&priv->alloc_mutex);
1599 mutex_destroy(&priv->bfregs.wc_head.lock);
1600 mutex_destroy(&priv->bfregs.reg_head.lock);
1601 mutex_destroy(&dev->intf_state_mutex);
1602 lockdep_unregister_key(&dev->lock_key);
1606 void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1608 struct mlx5_priv *priv = &dev->priv;
1610 if (priv->sw_vhca_id > 0)
1611 ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
1613 mlx5_hca_caps_free(dev);
1614 mlx5_adev_cleanup(dev);
1615 mlx5_pagealloc_cleanup(dev);
1616 mlx5_health_cleanup(dev);
1617 mlx5_tout_cleanup(dev);
1618 debugfs_remove_recursive(dev->priv.dbg.dbg_root);
1619 mutex_destroy(&priv->pgdir_mutex);
1620 mutex_destroy(&priv->alloc_mutex);
1621 mutex_destroy(&priv->bfregs.wc_head.lock);
1622 mutex_destroy(&priv->bfregs.reg_head.lock);
1623 mutex_destroy(&dev->intf_state_mutex);
1624 lockdep_unregister_key(&dev->lock_key);
1627 static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1629 struct mlx5_core_dev *dev;
1630 struct devlink *devlink;
1633 devlink = mlx5_devlink_alloc(&pdev->dev);
1635 dev_err(&pdev->dev, "devlink alloc failed\n");
1639 dev = devlink_priv(devlink);
1640 dev->device = &pdev->dev;
1643 dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
1644 MLX5_COREDEV_VF : MLX5_COREDEV_PF;
1646 dev->priv.adev_idx = mlx5_adev_idx_alloc();
1647 if (dev->priv.adev_idx < 0) {
1648 err = dev->priv.adev_idx;
1652 err = mlx5_mdev_init(dev, prof_sel);
1656 err = mlx5_pci_init(dev, pdev, id);
1658 mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
1663 err = mlx5_init_one(dev);
1665 mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
1670 err = mlx5_crdump_enable(dev);
1672 dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
1674 pci_save_state(pdev);
1675 devlink_register(devlink);
1679 mlx5_pci_close(dev);
1681 mlx5_mdev_uninit(dev);
1683 mlx5_adev_idx_free(dev->priv.adev_idx);
1685 mlx5_devlink_free(devlink);
1690 static void remove_one(struct pci_dev *pdev)
1692 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1693 struct devlink *devlink = priv_to_devlink(dev);
1695 /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
1696 * fw_reset before unregistering the devlink.
1698 mlx5_drain_fw_reset(dev);
1699 set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
1700 devlink_unregister(devlink);
1701 mlx5_sriov_disable(pdev);
1702 mlx5_crdump_disable(dev);
1703 mlx5_drain_health_wq(dev);
1704 mlx5_uninit_one(dev);
1705 mlx5_pci_close(dev);
1706 mlx5_mdev_uninit(dev);
1707 mlx5_adev_idx_free(dev->priv.adev_idx);
1708 mlx5_devlink_free(devlink);
1711 #define mlx5_pci_trace(dev, fmt, ...) ({ \
1712 struct mlx5_core_dev *__dev = (dev); \
1713 mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \
1714 __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \
1715 __dev->pci_status, ##__VA_ARGS__); \
1718 static const char *result2str(enum pci_ers_result result)
1720 return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" :
1721 result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" :
1722 result == PCI_ERS_RESULT_RECOVERED ? "recovered" :
1726 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1727 pci_channel_state_t state)
1729 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1730 enum pci_ers_result res;
1732 mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state);
1734 mlx5_enter_error_state(dev, false);
1735 mlx5_error_sw_reset(dev);
1736 mlx5_unload_one(dev);
1737 mlx5_drain_health_wq(dev);
1738 mlx5_pci_disable_device(dev);
1740 res = state == pci_channel_io_perm_failure ?
1741 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1743 mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
1747 /* wait for the device to show vital signs by waiting
1748 * for the health counter to start counting.
1750 static int wait_vital(struct pci_dev *pdev)
1752 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1753 struct mlx5_core_health *health = &dev->priv.health;
1754 const int niter = 100;
1759 for (i = 0; i < niter; i++) {
1760 count = ioread32be(health->health_counter);
1761 if (count && count != 0xffffffff) {
1762 if (last_count && last_count != count) {
1764 "wait vital counter value 0x%x after %d iterations\n",
1776 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1778 enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
1779 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1782 mlx5_pci_trace(dev, "Enter\n");
1784 err = mlx5_pci_enable_device(dev);
1786 mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
1791 pci_set_master(pdev);
1792 pci_restore_state(pdev);
1793 pci_save_state(pdev);
1795 err = wait_vital(pdev);
1797 mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n",
1802 res = PCI_ERS_RESULT_RECOVERED;
1804 mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
1808 static void mlx5_pci_resume(struct pci_dev *pdev)
1810 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1813 mlx5_pci_trace(dev, "Enter, loading driver..\n");
1815 err = mlx5_load_one(dev, false);
1817 mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
1818 !err ? "recovered" : "Failed");
1821 static const struct pci_error_handlers mlx5_err_handler = {
1822 .error_detected = mlx5_pci_err_detected,
1823 .slot_reset = mlx5_pci_slot_reset,
1824 .resume = mlx5_pci_resume
1827 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1829 bool fast_teardown = false, force_teardown = false;
1832 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1833 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1835 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1836 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1838 if (!fast_teardown && !force_teardown)
1841 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1842 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1846 /* Panic tear down fw command will stop the PCI bus communication
1847 * with the HCA, so the health poll is no longer needed.
1849 mlx5_drain_health_wq(dev);
1850 mlx5_stop_health_poll(dev, false);
1852 ret = mlx5_cmd_fast_teardown_hca(dev);
1856 ret = mlx5_cmd_force_teardown_hca(dev);
1860 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
1861 mlx5_start_health_poll(dev);
1865 mlx5_enter_error_state(dev, true);
1867 /* Some platforms requiring freeing the IRQ's in the shutdown
1868 * flow. If they aren't freed they can't be allocated after
1869 * kexec. There is no need to cleanup the mlx5_core software
1872 mlx5_core_eq_free_irqs(dev);
1877 static void shutdown(struct pci_dev *pdev)
1879 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1882 mlx5_core_info(dev, "Shutdown was called\n");
1883 set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
1884 err = mlx5_try_fast_unload(dev);
1886 mlx5_unload_one(dev);
1887 mlx5_pci_disable_device(dev);
1890 static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
1892 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1894 mlx5_unload_one(dev);
1899 static int mlx5_resume(struct pci_dev *pdev)
1901 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1903 return mlx5_load_one(dev, false);
1906 static const struct pci_device_id mlx5_core_pci_table[] = {
1907 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
1908 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
1909 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
1910 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1911 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
1912 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1913 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1914 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1915 { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
1916 { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
1917 { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
1918 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
1919 { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
1920 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
1921 { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
1922 { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
1923 { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
1924 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
1925 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
1926 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
1927 { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
1928 { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
1932 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1934 void mlx5_disable_device(struct mlx5_core_dev *dev)
1936 mlx5_error_sw_reset(dev);
1937 mlx5_unload_one_devl_locked(dev);
1940 int mlx5_recover_device(struct mlx5_core_dev *dev)
1942 if (!mlx5_core_is_sf(dev)) {
1943 mlx5_pci_disable_device(dev);
1944 if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
1948 return mlx5_load_one_devl_locked(dev, true);
1951 static struct pci_driver mlx5_core_driver = {
1952 .name = KBUILD_MODNAME,
1953 .id_table = mlx5_core_pci_table,
1955 .remove = remove_one,
1956 .suspend = mlx5_suspend,
1957 .resume = mlx5_resume,
1958 .shutdown = shutdown,
1959 .err_handler = &mlx5_err_handler,
1960 .sriov_configure = mlx5_core_sriov_configure,
1961 .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
1962 .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
1966 * mlx5_vf_get_core_dev - Get the mlx5 core device from a given VF PCI device if
1967 * mlx5_core is its driver.
1968 * @pdev: The associated PCI device.
1970 * Upon return the interface state lock stay held to let caller uses it safely.
1971 * Caller must ensure to use the returned mlx5 device for a narrow window
1972 * and put it back with mlx5_vf_put_core_dev() immediately once usage was over.
1974 * Return: Pointer to the associated mlx5_core_dev or NULL.
1976 struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev)
1978 struct mlx5_core_dev *mdev;
1980 mdev = pci_iov_get_pf_drvdata(pdev, &mlx5_core_driver);
1984 mutex_lock(&mdev->intf_state_mutex);
1985 if (!test_bit(MLX5_INTERFACE_STATE_UP, &mdev->intf_state)) {
1986 mutex_unlock(&mdev->intf_state_mutex);
1992 EXPORT_SYMBOL(mlx5_vf_get_core_dev);
1995 * mlx5_vf_put_core_dev - Put the mlx5 core device back.
1996 * @mdev: The mlx5 core device.
1998 * Upon return the interface state lock is unlocked and caller should not
1999 * access the mdev any more.
2001 void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev)
2003 mutex_unlock(&mdev->intf_state_mutex);
2005 EXPORT_SYMBOL(mlx5_vf_put_core_dev);
2007 static void mlx5_core_verify_params(void)
2009 if (prof_sel >= ARRAY_SIZE(profile)) {
2010 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
2012 ARRAY_SIZE(profile) - 1,
2014 prof_sel = MLX5_DEFAULT_PROF;
2018 static int __init init(void)
2022 WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
2023 "mlx5_core name not in sync with kernel module name");
2025 get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
2027 mlx5_core_verify_params();
2028 mlx5_register_debugfs();
2030 err = pci_register_driver(&mlx5_core_driver);
2034 err = mlx5_sf_driver_register();
2045 mlx5_sf_driver_unregister();
2047 pci_unregister_driver(&mlx5_core_driver);
2049 mlx5_unregister_debugfs();
2053 static void __exit cleanup(void)
2056 mlx5_sf_driver_unregister();
2057 pci_unregister_driver(&mlx5_core_driver);
2058 mlx5_unregister_debugfs();
2062 module_exit(cleanup);